From 0cddc0ef3c8360d10d686aa7c94d33fd6b017e77 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 9 Mar 2020 23:21:49 +0300 Subject: [PATCH 001/769] Update LruBlockCache.java --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 27 ++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index f93a651f5433..9620348b9b30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -153,6 +153,9 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; + private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; + private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 100; + /** * Defined the cache map as {@link ConcurrentHashMap} here, because in * {@link LruBlockCache#getBlock}, we need to guarantee the atomicity of map#computeIfPresent @@ -233,6 +236,9 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; + /** Percent of cached Data blocks */ + private final int cacheDataBlockPercent; + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). @@ -260,7 +266,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { DEFAULT_MEMORY_FACTOR, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, - DEFAULT_MAX_BLOCK_SIZE); + DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { @@ -276,7 +283,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), - conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -302,7 +310,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize) { + boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -338,6 +346,11 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, } else { this.evictionThread = null; } + + // check the bounds + cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; + this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, @@ -400,6 +413,14 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + // Don't cache this DATA block if we have limit on BlockCache, + // good for performance (HBASE-23887) + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; + } + } + if (buf.heapSize() > maxBlockSize) { // If there are a lot of blocks that are too // big this can make the logs way too noisy. From 7701647d653811174c4731768e3d9da66fdeddd0 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 9 Mar 2020 23:22:39 +0300 Subject: [PATCH 002/769] Update TestLruBlockCache.java --- .../hbase/io/hfile/TestLruBlockCache.java | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index af70f3db7cc4..96ba3a9e3592 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -342,7 +342,8 @@ public void testCacheEvictionThreePriorities() throws Exception { 0.34f, // memory 1.2f, // limit false, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -464,7 +465,8 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 0.5f, // memory 1.2f, // limit true, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -571,7 +573,8 @@ public void testScanResistance() throws Exception { 0.34f, // memory 1.2f, // limit false, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -635,7 +638,8 @@ public void testMaxBlockSize() throws Exception { 0.34f, // memory 1.2f, // limit false, - 1024); + 1024, + 100); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -675,7 +679,8 @@ public void testResizeBlockCache() throws Exception { 0.34f, // memory 1.2f, // limit false, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -837,7 +842,8 @@ public void testCacheBlockNextBlockMetadataMissing() { 0.34f, // memory 1.2f, // limit false, - 1024); + 1024, + 100); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1026,8 +1032,8 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.33f, // multi 0.34f, // memory 1.2f, // limit - false, 1024); + false, 1024, + 100); testMultiThreadGetAndEvictBlockInternal(cache); } } - From 5bae8c855f988010d290b5f6a49eb4b500f6fbde Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 07:36:45 +0300 Subject: [PATCH 003/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 9620348b9b30..37d40e40ff5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -236,7 +236,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached Data blocks */ + /** Percent of cached data blocks */ private final int cacheDataBlockPercent; /** From eacb57abaf8d291b1aa75743ac386f7c684342d3 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 10:42:33 +0300 Subject: [PATCH 004/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 37d40e40ff5b..9620348b9b30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -236,7 +236,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached data blocks */ + /** Percent of cached Data blocks */ private final int cacheDataBlockPercent; /** From f813b5b42fd9c22eb890c2d8c0a41cb4d2408c5c Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 13:21:11 +0300 Subject: [PATCH 005/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 9620348b9b30..37d40e40ff5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -236,7 +236,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached Data blocks */ + /** Percent of cached data blocks */ private final int cacheDataBlockPercent; /** From 141b133530887acded34a135e032f55844ec0e33 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 14:55:09 +0300 Subject: [PATCH 006/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 37d40e40ff5b..9620348b9b30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -236,7 +236,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached data blocks */ + /** Percent of cached Data blocks */ private final int cacheDataBlockPercent; /** From 615cfa7161a68338e22ccf3f2a9d131d017f9f0f Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 17:37:49 +0300 Subject: [PATCH 007/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 9620348b9b30..37d40e40ff5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -236,7 +236,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached Data blocks */ + /** Percent of cached data blocks */ private final int cacheDataBlockPercent; /** From 909abc0b1b3915b3319da241199f31ce45f01b7b Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 15:06:06 +0300 Subject: [PATCH 008/769] Update LruBlockCache.java Added parameters that help to control of eviction process: hbase.lru.cache.heavy.eviction.count.limit - set how many times have to run eviction process that avoid of putting data to BlockCache hbase.lru.cache.heavy.eviction.bytes.size.limit - set how many bytes have to evicted each time that avoid of putting data to BlockCache By default if 10 times (100 secudns) evicted more than 10 MB (each time) then we start to skip 50% of data blocks. --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 65 +++++++++++++++---- 1 file changed, 51 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 37d40e40ff5b..87a787014d20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -154,7 +154,15 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; - private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 100; + private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 50; + + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT + = "hbase.lru.cache.heavy.eviction.count.limit"; + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; + + private static final String LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT + = "hbase.lru.cache.heavy.eviction.bytes.size.limit"; + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT = 10 * 1024 * 1024; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in @@ -238,7 +246,16 @@ public class LruBlockCache implements FirstLevelBlockCache { /** Percent of cached data blocks */ private final int cacheDataBlockPercent; - + + /** Counter to control of eviction process */ + private static int heavyEvictionCount; + + /** Limit of count eviction process when start to avoid to cache blocks */ + private final int heavyEvictionCountLimit; + + /** Limit of volume eviction process when start to avoid to cache blocks */ + private static int heavyEvictionBytesSizeLimit; + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). @@ -267,7 +284,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT); + DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { @@ -284,7 +303,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), - conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT)); + conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -310,7 +331,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent) { + boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, + int heavyEvictionCountLimit, int heavyEvictionBytesSizeLimit) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -350,6 +372,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, // check the bounds cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; + this.heavyEvictionCountLimit = heavyEvictionCountLimit; + this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. @@ -413,11 +437,15 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { - // Don't cache this DATA block if we have limit on BlockCache, - // good for performance (HBASE-23887) - if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { - return; + + // Don't cache this DATA block when too many blocks evicted + // and if we have limit on percent of blocks to cache + // good for performance (HBASE-23887) + if (heavyEvictionCount > heavyEvictionCountLimit) { + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; + } } } @@ -676,10 +704,11 @@ long getOverhead() { /** * Eviction method. */ - void evict() { + long evict() { // Ensure only one eviction at a time - if(!evictionLock.tryLock()) return; + if(!evictionLock.tryLock()) return 0; + long bytesToFree = 0L; try { evictionInProgress = true; @@ -692,7 +721,7 @@ void evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) return; + if (bytesToFree <= 0) return 0; // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); @@ -791,6 +820,7 @@ void evict() { stats.evict(); evictionInProgress = false; evictionLock.unlock(); + return bytesToFree; } } @@ -957,6 +987,8 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { enteringRun = true; + long bytesFreed; + heavyEvictionCount = 0; while (this.go) { synchronized (this) { try { @@ -968,7 +1000,12 @@ public void run() { } LruBlockCache cache = this.cache.get(); if (cache == null) break; - cache.evict(); + bytesFreed = cache.evict(); + // Control of heavy cleaning BlockCache + if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) + heavyEvictionCount++; + else + heavyEvictionCount = 0; } } From a855081b14f11e4d4fc5564fbbccf2f4181a96ca Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 15:10:24 +0300 Subject: [PATCH 009/769] Update TestLruBlockCache.java Fixed tests --- .../hbase/io/hfile/TestLruBlockCache.java | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 96ba3a9e3592..8d24f0205a0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -343,7 +343,9 @@ public void testCacheEvictionThreePriorities() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -466,7 +468,9 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 1.2f, // limit true, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -574,7 +578,10 @@ public void testScanResistance() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); + CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -639,7 +646,9 @@ public void testMaxBlockSize() throws Exception { 1.2f, // limit false, 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -680,7 +689,9 @@ public void testResizeBlockCache() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -843,7 +854,9 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1033,7 +1046,9 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.34f, // memory 1.2f, // limit false, 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); } } From 3a8d9a1cc2aa20a66db33c7cb2b89cb4ac5ef231 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 16:59:14 +0300 Subject: [PATCH 010/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 87a787014d20..e273c5a6ec9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -713,7 +713,7 @@ long evict() { try { evictionInProgress = true; long currentSize = this.size.get(); - long bytesToFree = currentSize - minSize(); + bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { LOG.trace("Block cache LRU eviction started; Attempting to free " + From 0e1bf76aa49c371d811f5d99c7877947addd9bb1 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 18:34:34 +0300 Subject: [PATCH 011/769] Update TestLruBlockCache.java fix bug --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 8d24f0205a0a..50889a891921 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -344,7 +344,7 @@ public void testCacheEvictionThreePriorities() throws Exception { false, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); @@ -469,7 +469,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { true, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); @@ -579,7 +579,7 @@ public void testScanResistance() throws Exception { false, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); @@ -647,7 +647,7 @@ public void testMaxBlockSize() throws Exception { false, 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -690,7 +690,7 @@ public void testResizeBlockCache() throws Exception { false, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); @@ -855,7 +855,7 @@ public void testCacheBlockNextBlockMetadataMissing() { false, 1024, 100, - 10 + 10, 10 * 1024 * 1024); BlockCacheKey key = new BlockCacheKey("key1", 0); @@ -1047,7 +1047,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 1.2f, // limit false, 1024, 100, - 10 + 10, 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); } From 1b25d67d87c6667b46d6b58dc6175b3ea21ab06c Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 13:51:53 +0300 Subject: [PATCH 012/769] Update LruBlockCache.java fixed style --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index e273c5a6ec9a..d8d428605102 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -156,7 +156,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 50; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = "hbase.lru.cache.heavy.eviction.count.limit"; private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; @@ -246,16 +246,16 @@ public class LruBlockCache implements FirstLevelBlockCache { /** Percent of cached data blocks */ private final int cacheDataBlockPercent; - + /** Counter to control of eviction process */ private static int heavyEvictionCount; /** Limit of count eviction process when start to avoid to cache blocks */ private final int heavyEvictionCountLimit; - + /** Limit of volume eviction process when start to avoid to cache blocks */ private static int heavyEvictionBytesSizeLimit; - + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). @@ -304,8 +304,10 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -437,8 +439,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - - // Don't cache this DATA block when too many blocks evicted + + // Don't cache this DATA block when too many blocks evict // and if we have limit on percent of blocks to cache // good for performance (HBASE-23887) if (heavyEvictionCount > heavyEvictionCountLimit) { @@ -707,7 +709,7 @@ long getOverhead() { long evict() { // Ensure only one eviction at a time - if(!evictionLock.tryLock()) return 0; + if (!evictionLock.tryLock()) {return 0}; long bytesToFree = 0L; try { @@ -721,7 +723,7 @@ long evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) return 0; + if (bytesToFree <= 0) {return 0}; // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); @@ -1002,10 +1004,12 @@ public void run() { if (cache == null) break; bytesFreed = cache.evict(); // Control of heavy cleaning BlockCache - if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) + if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) { heavyEvictionCount++; - else + } + else { heavyEvictionCount = 0; + } } } From 81dcaaabe3c7b854c38619f31d97f24573fc4c97 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 14:17:11 +0300 Subject: [PATCH 013/769] Update LruBlockCache.java fixed bug --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index d8d428605102..29c456806bd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -709,7 +709,7 @@ long getOverhead() { long evict() { // Ensure only one eviction at a time - if (!evictionLock.tryLock()) {return 0}; + if (!evictionLock.tryLock()) {return 0;} long bytesToFree = 0L; try { @@ -723,7 +723,7 @@ long evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) {return 0}; + if (bytesToFree <= 0) {return 0;} // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); From ac72f829be0fafca86904b2cd9a2203cfc0b03fc Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 15:24:37 +0300 Subject: [PATCH 014/769] Update LruBlockCache.java fixed style --- .../apache/hadoop/hbase/io/hfile/LruBlockCache.java | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 29c456806bd3..1745ddb601ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -304,9 +304,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, + conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); } @@ -709,7 +709,10 @@ long getOverhead() { long evict() { // Ensure only one eviction at a time - if (!evictionLock.tryLock()) {return 0;} + if (!evictionLock.tryLock()) { + return 0; + } + long bytesToFree = 0L; try { @@ -723,7 +726,9 @@ long evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) {return 0;} + if (bytesToFree <= 0) { + return 0; + } // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); From 53319da2c1d1f39f1d83cc26228f439b98aaaf6e Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 19:04:53 +0300 Subject: [PATCH 015/769] Update TestLruBlockCache.java added unit test --- .../hbase/io/hfile/TestLruBlockCache.java | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 50889a891921..35ca376194a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1051,4 +1051,51 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); } + + public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exception { + long maxSize = 100000; + int numBlocks = 100; + final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + percentOfCachedBlocks, + 0, + 1); + + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + + final String hfileName = "hfile"; + for (int blockIndex = 0; blockIndex <= numBlocks * 5; ++blockIndex) { + CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); + cache.cacheBlock(block.cacheKey, block, false); + Thread.sleep(1); + } + + // Check if all offset of cached blocks less + // It means some of blocka were not put into BlockCache + for (BlockCacheKey key : cache.getMapForTests().keySet()) + Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + + } + + @Test + public void testSkipCacheDataBlocks() throws Exception { + for (int percentOfCachedBlocks = 25; percentOfCachedBlocks <= 100; percentOfCachedBlocks+=25) { + testSkipCacheDataBlocksInteral(percentOfCachedBlocks); + } + } } From c1a729fef6054cfc6e1400e21f6a0fd8eca1be36 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 19:06:18 +0300 Subject: [PATCH 016/769] Update TestLruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 35ca376194a7..a93e3e32c9b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1089,7 +1089,6 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc // It means some of blocka were not put into BlockCache for (BlockCacheKey key : cache.getMapForTests().keySet()) Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); - } @Test From 5c2735b692c513129ea56529eabfc4974e70fdd2 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 20:11:36 +0300 Subject: [PATCH 017/769] Update TestLruBlockCache.java fix codestyle --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index a93e3e32c9b1..c1285bc35813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1087,8 +1087,9 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc // Check if all offset of cached blocks less // It means some of blocka were not put into BlockCache - for (BlockCacheKey key : cache.getMapForTests().keySet()) + for (BlockCacheKey key : cache.getMapForTests().keySet()) { Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + } } @Test From 8755d0dd4111fc1e3f5a13f748949c19a371d9e4 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 20:12:22 +0300 Subject: [PATCH 018/769] Update LruBlockCache.java fix codestyle --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 1745ddb601ea..eb55e393e322 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -712,7 +712,7 @@ long evict() { if (!evictionLock.tryLock()) { return 0; } - + long bytesToFree = 0L; try { From ccbbb66672197e85f370760ad74beca7726ba805 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 21:28:05 +0300 Subject: [PATCH 019/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index eb55e393e322..6bad767a6a3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -251,7 +251,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static int heavyEvictionCount; /** Limit of count eviction process when start to avoid to cache blocks */ - private final int heavyEvictionCountLimit; + private static int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ private static int heavyEvictionBytesSizeLimit; From c9d4fb929d14164916c66d595734961f1d989146 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 09:05:43 +0300 Subject: [PATCH 020/769] Update LruBlockCache.java refactoring --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 6bad767a6a3a..0c2ce6294c55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -248,13 +248,13 @@ public class LruBlockCache implements FirstLevelBlockCache { private final int cacheDataBlockPercent; /** Counter to control of eviction process */ - private static int heavyEvictionCount; + private volatile int heavyEvictionCount; /** Limit of count eviction process when start to avoid to cache blocks */ - private static int heavyEvictionCountLimit; + private final int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ - private static int heavyEvictionBytesSizeLimit; + private final int heavyEvictionBytesSizeLimit; /** * Default constructor. Specify maximum size and expected average block @@ -376,6 +376,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; this.heavyEvictionCountLimit = heavyEvictionCountLimit; this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; + this.heavyEvictionCount = 0; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. @@ -441,8 +442,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { // Don't cache this DATA block when too many blocks evict - // and if we have limit on percent of blocks to cache - // good for performance (HBASE-23887) + // and if we have limit on percent of blocks to cache. + // It is good for performance (HBASE-23887) if (heavyEvictionCount > heavyEvictionCountLimit) { if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { @@ -995,7 +996,6 @@ public EvictionThread(LruBlockCache cache) { public void run() { enteringRun = true; long bytesFreed; - heavyEvictionCount = 0; while (this.go) { synchronized (this) { try { @@ -1008,12 +1008,14 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; bytesFreed = cache.evict(); - // Control of heavy cleaning BlockCache - if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) { - heavyEvictionCount++; + // If heavy cleaning BlockCache control. + // It helps avoid put too many blocks into BlockCache + // when evict() works very active. + if (bytesFreed > 0 && bytesFreed > cache.heavyEvictionBytesSizeLimit) { + cache.heavyEvictionCount++; } else { - heavyEvictionCount = 0; + cache.heavyEvictionCount = 0; } } } @@ -1092,8 +1094,8 @@ public CacheStats getStats() { public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( (4 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + - (6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) - + ClassSize.OBJECT); + (6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + + (4 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); @Override public long heapSize() { From 8710ca50ed82dcc36fd6ae40d18db55df4dc15f8 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 09:06:47 +0300 Subject: [PATCH 021/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0c2ce6294c55..0c14efbdfbaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -373,7 +373,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, // check the bounds cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; - this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; + this.cacheDataBlockPercent = cacheDataBlockPercent < 1 ? 1 : cacheDataBlockPercent; this.heavyEvictionCountLimit = heavyEvictionCountLimit; this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; this.heavyEvictionCount = 0; From 251ec6a139e87c4518ce3e9b27b668e463181069 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 09:09:12 +0300 Subject: [PATCH 022/769] Update TestLruBlockCache.java added comments --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index c1285bc35813..188b54408c5e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1085,7 +1085,7 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc Thread.sleep(1); } - // Check if all offset of cached blocks less + // Check if all offset (last two digits) of cached blocks less than the percent. // It means some of blocka were not put into BlockCache for (BlockCacheKey key : cache.getMapForTests().keySet()) { Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); From cabf45d026eb2e695c824c986cad8908256647d2 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 11:09:51 +0300 Subject: [PATCH 023/769] Update TestLruBlockCache.java adjust tests to default params --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 188b54408c5e..f983d7c1f88e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -343,7 +343,7 @@ public void testCacheEvictionThreePriorities() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -468,7 +468,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 1.2f, // limit true, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -578,7 +578,7 @@ public void testScanResistance() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -646,7 +646,7 @@ public void testMaxBlockSize() throws Exception { 1.2f, // limit false, 1024, - 100, + 50, 10, 10 * 1024 * 1024); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); @@ -689,7 +689,7 @@ public void testResizeBlockCache() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -854,7 +854,7 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -1046,7 +1046,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.34f, // memory 1.2f, // limit false, 1024, - 100, + 50, 10, 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); From a4ebde3c43e229c57c908c8f69c5faccb2b51fb5 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 11:56:29 +0300 Subject: [PATCH 024/769] Update LruBlockCache.java Implemented new auto-scale eviction rate logic --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 106 ++++++++++++------ 1 file changed, 73 insertions(+), 33 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0c14efbdfbaa..cf6cb5a50ef5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -153,16 +153,17 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; - private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; - private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 50; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = "hbase.lru.cache.heavy.eviction.count.limit"; private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; - private static final String LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.bytes.size.limit"; - private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT = 10 * 1024 * 1024; + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT + = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; + + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT + = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final double DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in @@ -245,16 +246,16 @@ public class LruBlockCache implements FirstLevelBlockCache { private transient BlockCache victimHandler = null; /** Percent of cached data blocks */ - private final int cacheDataBlockPercent; - - /** Counter to control of eviction process */ - private volatile int heavyEvictionCount; + private volatile int cacheDataBlockPercent; /** Limit of count eviction process when start to avoid to cache blocks */ private final int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ - private final int heavyEvictionBytesSizeLimit; + private final int heavyEvictionMbSizeLimit; + + /** Adjust auto-scaling via overhead of evition rate */ + private final double heavyEvictionOverheadCoefficient; /** * Default constructor. Specify maximum size and expected average block @@ -284,9 +285,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT); + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { @@ -303,11 +304,12 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), - conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); + conf.getInt(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), + conf.getDouble(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -334,7 +336,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, - int heavyEvictionCountLimit, int heavyEvictionBytesSizeLimit) { + int heavyEvictionCountLimit, int heavyEvictionMbSizeLimit, + double heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -372,11 +375,14 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, } // check the bounds - cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; - this.cacheDataBlockPercent = cacheDataBlockPercent < 1 ? 1 : cacheDataBlockPercent; - this.heavyEvictionCountLimit = heavyEvictionCountLimit; - this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; - this.heavyEvictionCount = 0; + this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; + this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; + this.cacheDataBlockPercent = 100; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 1 + ? 1 : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001 + ? 0.001 : heavyEvictionOverheadCoefficient; + this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. @@ -399,6 +405,11 @@ public void setMaxSize(long maxSize) { runEviction(); } } + + @VisibleForTesting + public int getCacheDataBlockPercent() { + return cacheDataBlockPercent; + } /** * The block cached in LRUBlockCache will always be an heap block: on the one side, the heap @@ -444,11 +455,9 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // Don't cache this DATA block when too many blocks evict // and if we have limit on percent of blocks to cache. // It is good for performance (HBASE-23887) - if (heavyEvictionCount > heavyEvictionCountLimit) { - if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { - if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { - return; - } + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; } } @@ -994,8 +1003,11 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { - enteringRun = true; long bytesFreed; + long mbFreedSum = 0; + int heavyEvictionCount = 0; + int freedDataOverheadPercent = 0; + long startTime = System.currentTimeMillis(); while (this.go) { synchronized (this) { try { @@ -1008,14 +1020,42 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; bytesFreed = cache.evict(); + long stopTime = System.currentTimeMillis(); // If heavy cleaning BlockCache control. // It helps avoid put too many blocks into BlockCache // when evict() works very active. - if (bytesFreed > 0 && bytesFreed > cache.heavyEvictionBytesSizeLimit) { - cache.heavyEvictionCount++; - } - else { - cache.heavyEvictionCount = 0; + if (stopTime - startTime <= 1000 * 10 - 1) { + mbFreedSum += bytesFreed/1024/1024; + } else { + freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; + if (mbFreedSum > cache.heavyEvictionMbSizeLimit) { + heavyEvictionCount++; + if (heavyEvictionCount > cache.heavyEvictionCountLimit) { + int ch = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + ch = ch > 15 ? 15 : ch; + ch = ch < 0 ? 0 : ch; + cache.cacheDataBlockPercent -= ch; + cache.cacheDataBlockPercent = cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; + } + } else { + if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { + // It help avoid exit during short-term fluctuation + int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); + cache.cacheDataBlockPercent += ch; + cache.cacheDataBlockPercent = cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; + } else { + heavyEvictionCount = 0; + cache.cacheDataBlockPercent = 100; + } + } + LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + mbFreedSum, freedDataOverheadPercent, + heavyEvictionCount, cache.cacheDataBlockPercent); + + mbFreedSum = 0; + startTime = stopTime; } } } From 44b2ff05f9e9f5b63e1d1157725a74a0020385bb Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 11:56:37 +0300 Subject: [PATCH 025/769] Update TestLruBlockCache.java new test --- .../hbase/io/hfile/TestLruBlockCache.java | 93 +++++++++++-------- 1 file changed, 53 insertions(+), 40 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index f983d7c1f88e..9523fe5622c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -343,9 +343,9 @@ public void testCacheEvictionThreePriorities() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -468,9 +468,9 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 1.2f, // limit true, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -578,10 +578,9 @@ public void testScanResistance() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); - + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -646,9 +645,10 @@ public void testMaxBlockSize() throws Exception { 1.2f, // limit false, 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); + CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -689,9 +689,9 @@ public void testResizeBlockCache() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -854,9 +854,9 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1046,31 +1046,31 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.34f, // memory 1.2f, // limit false, 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); testMultiThreadGetAndEvictBlockInternal(cache); } - - public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exception { - long maxSize = 100000; - int numBlocks = 100; + + public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { + long maxSize = 100000000; + int numBlocks = 100000; final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); final LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - maxSize, - percentOfCachedBlocks, - 0, - 1); + new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + heavyEvictionCountLimit, + 500, + 0.01); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); @@ -1079,23 +1079,36 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc } final String hfileName = "hfile"; - for (int blockIndex = 0; blockIndex <= numBlocks * 5; ++blockIndex) { + for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); cache.cacheBlock(block.cacheKey, block, false); - Thread.sleep(1); } - // Check if all offset (last two digits) of cached blocks less than the percent. - // It means some of blocka were not put into BlockCache - for (BlockCacheKey key : cache.getMapForTests().keySet()) { - Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + evictionThread.evict(); + Thread.sleep(100); + + if (heavyEvictionCountLimit == 0) { + // Check if all offset (last two digits) of cached blocks less than the percent. + // It means some of blocks haven't not put into BlockCache + assertTrue(cache.getCacheDataBlockPercent() < 90); + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + assertTrue(!(key.getOffset() % 100 > 90)); + } + } else { + assertTrue(cache.getCacheDataBlockPercent() == 100); + int counter = 0; + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + if (key.getOffset() % 100 > 90) + counter++; + } + assertTrue(counter > 1000); } } @Test public void testSkipCacheDataBlocks() throws Exception { - for (int percentOfCachedBlocks = 25; percentOfCachedBlocks <= 100; percentOfCachedBlocks+=25) { - testSkipCacheDataBlocksInteral(percentOfCachedBlocks); - } + testSkipCacheDataBlocksInteral(0); + testSkipCacheDataBlocksInteral(100); } + } From b98324510cdee4440860edef698b0b3855bbaae4 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 12:35:40 +0300 Subject: [PATCH 026/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index cf6cb5a50ef5..162a0c9424c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -306,7 +306,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), conf.getDouble(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); From e3aa502bf6c6ea25d76ed647f764dfc04ffdc2f2 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 12:35:45 +0300 Subject: [PATCH 027/769] Update TestLruBlockCache.java --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 9523fe5622c5..a10799717831 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1069,7 +1069,7 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E false, maxSize, heavyEvictionCountLimit, - 500, + 200, 0.01); EvictionThread evictionThread = cache.getEvictionThread(); @@ -1082,6 +1082,8 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); cache.cacheBlock(block.cacheKey, block, false); + if (cache.getCacheDataBlockPercent() < 70) // enough for test + break; } evictionThread.evict(); @@ -1089,12 +1091,13 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E if (heavyEvictionCountLimit == 0) { // Check if all offset (last two digits) of cached blocks less than the percent. - // It means some of blocks haven't not put into BlockCache + // It means some of blocks haven't put into BlockCache assertTrue(cache.getCacheDataBlockPercent() < 90); for (BlockCacheKey key : cache.getMapForTests().keySet()) { assertTrue(!(key.getOffset() % 100 > 90)); } } else { + // Check that auto-scaling is not working (all blocks in BlockCache) assertTrue(cache.getCacheDataBlockPercent() == 100); int counter = 0; for (BlockCacheKey key : cache.getMapForTests().keySet()) { @@ -1103,12 +1106,16 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E } assertTrue(counter > 1000); } + evictionThread.shutdown(); } @Test public void testSkipCacheDataBlocks() throws Exception { + // Check that auto-scaling will work right after start testSkipCacheDataBlocksInteral(0); + // Check that auto-scaling will not work right after start + // (have to finished before auto-scaling) testSkipCacheDataBlocksInteral(100); } - + } From 0ea3b01a66f54047cbab6c6f699c3948cb5811f7 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 13:10:28 +0300 Subject: [PATCH 028/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 162a0c9424c9..8ae79ee68613 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -252,7 +252,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private final int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ - private final int heavyEvictionMbSizeLimit; + private final long heavyEvictionMbSizeLimit; /** Adjust auto-scaling via overhead of evition rate */ private final double heavyEvictionOverheadCoefficient; @@ -336,7 +336,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, - int heavyEvictionCountLimit, int heavyEvictionMbSizeLimit, + int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, double heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || From 20509cd17f7c47162994825f636a68bcc79200aa Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 14:09:24 +0300 Subject: [PATCH 029/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 8ae79ee68613..5f8fc6c21519 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -335,7 +335,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, + boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, double heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; From 3c7c6bff90e070b5eee5515163dcce225d5e704e Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 15:49:19 +0300 Subject: [PATCH 030/769] Update LruBlockCache.java --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 5f8fc6c21519..6c5197670504 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -163,7 +163,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; - private static final double DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01; + private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in @@ -255,7 +255,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private final long heavyEvictionMbSizeLimit; /** Adjust auto-scaling via overhead of evition rate */ - private final double heavyEvictionOverheadCoefficient; + private final float heavyEvictionOverheadCoefficient; /** * Default constructor. Specify maximum size and expected average block @@ -308,7 +308,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), - conf.getDouble(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); } @@ -337,7 +337,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, float multiFactor, float memoryFactor, float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, - double heavyEvictionOverheadCoefficient) { + float heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -1133,9 +1133,9 @@ public CacheStats getStats() { } public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( - (4 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + - (6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + - (4 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); + (5 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + + (7 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + + (1 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); @Override public long heapSize() { From d10daeb262f9e48dc550e8ae7eac9a75f0e23331 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 16:35:33 +0300 Subject: [PATCH 031/769] Update LruBlockCache.java Fixed errors --- .../apache/hadoop/hbase/io/hfile/LruBlockCache.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 6c5197670504..f5050da8b71f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -378,10 +378,10 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; this.cacheDataBlockPercent = 100; - heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 1 - ? 1 : heavyEvictionOverheadCoefficient; - heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001 - ? 0.001 : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 0.1f + ? 1f : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001f + ? 0.001f : heavyEvictionOverheadCoefficient; this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log @@ -1135,7 +1135,7 @@ public CacheStats getStats() { public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( (5 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + (7 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + - (1 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); + (2 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); @Override public long heapSize() { From 952e335d6c67af4f32e6efd361d5f76107364fff Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 18:08:17 +0300 Subject: [PATCH 032/769] Update TestLruBlockCache.java Fix --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index a10799717831..07b527f9e4bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -345,7 +345,7 @@ public void testCacheEvictionThreePriorities() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -470,7 +470,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -580,7 +580,7 @@ public void testScanResistance() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -647,7 +647,7 @@ public void testMaxBlockSize() throws Exception { 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -691,7 +691,7 @@ public void testResizeBlockCache() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -856,7 +856,7 @@ public void testCacheBlockNextBlockMetadataMissing() { 1024, 10, 500, - 0.01); + 0.01f); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1048,7 +1048,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { false, 1024, 10, 500, - 0.01); + 0.01f); testMultiThreadGetAndEvictBlockInternal(cache); } @@ -1070,7 +1070,7 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E maxSize, heavyEvictionCountLimit, 200, - 0.01); + 0.01f); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); From 13bb905ad78f0aa2b6aa94796b78f557832277d0 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 21:00:29 +0300 Subject: [PATCH 033/769] Update LruBlockCache.java fixed codestyle --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index f5050da8b71f..e84a87368f40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -160,7 +160,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = "hbase.lru.cache.heavy.eviction.mb.size.limit"; private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; - + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; @@ -335,7 +335,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, + boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, float heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; @@ -405,7 +405,7 @@ public void setMaxSize(long maxSize) { runEviction(); } } - + @VisibleForTesting public int getCacheDataBlockPercent() { return cacheDataBlockPercent; @@ -1027,7 +1027,8 @@ public void run() { if (stopTime - startTime <= 1000 * 10 - 1) { mbFreedSum += bytesFreed/1024/1024; } else { - freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; + freedDataOverheadPercent = + (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; if (mbFreedSum > cache.heavyEvictionMbSizeLimit) { heavyEvictionCount++; if (heavyEvictionCount > cache.heavyEvictionCountLimit) { @@ -1035,14 +1036,16 @@ public void run() { ch = ch > 15 ? 15 : ch; ch = ch < 0 ? 0 : ch; cache.cacheDataBlockPercent -= ch; - cache.cacheDataBlockPercent = cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = + cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; } } else { if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { // It help avoid exit during short-term fluctuation int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); cache.cacheDataBlockPercent += ch; - cache.cacheDataBlockPercent = cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = + cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; } else { heavyEvictionCount = 0; cache.cacheDataBlockPercent = 100; From 4e969c13fc140e5b64c0a5645b8652bdd66cb371 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 21:00:31 +0300 Subject: [PATCH 034/769] Update TestLruBlockCache.java fixed codestyle --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 07b527f9e4bd..ee3593ec36f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1060,7 +1060,8 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E final LruBlockCache cache = new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.5f, // min 0.99f, // acceptable 0.33f, // single 0.33f, // multi @@ -1082,8 +1083,10 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); cache.cacheBlock(block.cacheKey, block, false); - if (cache.getCacheDataBlockPercent() < 70) // enough for test + if (cache.getCacheDataBlockPercent() < 70) { + // enough for test break; + } } evictionThread.evict(); @@ -1101,8 +1104,9 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E assertTrue(cache.getCacheDataBlockPercent() == 100); int counter = 0; for (BlockCacheKey key : cache.getMapForTests().keySet()) { - if (key.getOffset() % 100 > 90) + if (key.getOffset() % 100 > 90) { counter++; + } } assertTrue(counter > 1000); } From 86fd011adee58c7c2190eafdce83a9d3719480e9 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 7 Jun 2020 08:21:54 +0300 Subject: [PATCH 035/769] Update TestLruBlockCache.java --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index ee3593ec36f1..6f572bd1bff3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1051,7 +1051,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.01f); testMultiThreadGetAndEvictBlockInternal(cache); } - + public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { long maxSize = 100000000; int numBlocks = 100000; @@ -1060,7 +1060,7 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E final LruBlockCache cache = new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min 0.99f, // acceptable 0.33f, // single From 3a6d57868a2ea9e1337249dd4b6d464910c63779 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 11:07:56 +0300 Subject: [PATCH 036/769] Update LruBlockCache.java Fixed remarks --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 85 ++++++++++++++++--- 1 file changed, 72 insertions(+), 13 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index e84a87368f40..7c36a64cf519 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -155,14 +155,18 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = "hbase.lru.cache.heavy.eviction.count.limit"; - private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; + // Default value actually equal to disable feature of increasing performance. + // Because 2147483647 is about ~680 years (after that it will start to work) + // We can set it to 0-10 and get the profit right now. + // (see details https://issues.apache.org/jira/browse/HBASE-23887). + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 2147483647; private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + = "hbase.lru.cache.heavy.eviction.mb.size.limit"; private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT - = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; /** @@ -452,10 +456,13 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - // Don't cache this DATA block when too many blocks evict - // and if we have limit on percent of blocks to cache. - // It is good for performance (HBASE-23887) + // Some data blocks will not put into BlockCache when eviction rate too much. + // It is good for performance + // (see details: https://issues.apache.org/jira/browse/HBASE-23887) + // How to calculate it can find inside EvictionThread class. if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + // It works like filter - blocks which two last digits of offset + // more than we calculate in Eviction Thread will not put into BlockCache if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { return; } @@ -715,6 +722,11 @@ long getOverhead() { /** * Eviction method. + * + * Evict items in order of use, allowing delete items + * which haven't been used for the longest amount of time. + * + * @return how many bytes were freed */ long evict() { @@ -837,7 +849,7 @@ long evict() { stats.evict(); evictionInProgress = false; evictionLock.unlock(); - return bytesToFree; + return bytesFreed; } } @@ -1020,33 +1032,80 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; bytesFreed = cache.evict(); + /* + * Sometimes we are reading more data than can fit into BlockCache + * and it is the cause a high rate of evictions. + * This in turn leads to heavy Garbage Collector works. + * So a lot of blocks put into BlockCache but never read, + * but spending a lot of CPU resources. + * Here we will analyze how many bytes were freed and decide + * decide whether the time has come to reduce amount of caching blocks. + * It help avoid put too many blocks into BlockCache + * when evict() works very active and save CPU for other jobs. + * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + */ + + // First of all we have to control how much time + // has passed since previuos evict() was launched + // This is should be almost the same time (+/- 10s) + // because we get comparable volumes of freed bytes each time. + // 10s because this is default period to run evict() (see above this.wait) long stopTime = System.currentTimeMillis(); - // If heavy cleaning BlockCache control. - // It helps avoid put too many blocks into BlockCache - // when evict() works very active. if (stopTime - startTime <= 1000 * 10 - 1) { mbFreedSum += bytesFreed/1024/1024; } else { + // Here we have to calc what situation we have got. + // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" + // and can calculte overhead on it. + // We will use this information to decide, + // how to change percent of caching blocks. freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; - if (mbFreedSum > cache.heavyEvictionMbSizeLimit) { + if (freedDataOverheadPercent > 100) { + // Now we are in the situation when we are above the limit + // But maybe we are going to ignore it because it will end quite soon heavyEvictionCount++; if (heavyEvictionCount > cache.heavyEvictionCountLimit) { + // It is going for a long time and we have to reduce of caching + // blocks now. So we calculate here how many blocks we want to skip. + // It depends on: + // 1. Overhead - if overhead is big we could more aggressive + // reducing amount of caching blocks. + // 2. How fast we want to get the result. If we know that our + // heavy reading for a long time, we don't want to wait and can + // increase the coefficient and get good performance quite soon. + // But if we don't sure we can do it slowly and it could prevent + // premature exit from this mode. So, when the coefficient is + // higher we can get better performance when heavy reading is stable. + // But when reading is changing we can adjust to it and set + // the coefficient to lower value. int ch = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + // But practice shows that 15% of reducing is quite enough. + // We are not greedy (it could lead to premature exit). ch = ch > 15 ? 15 : ch; - ch = ch < 0 ? 0 : ch; + ch = ch < 0 ? 0 : ch; // I think it will never happen but check for sure + // So this is the key point, here we are reducing % of caching blocks cache.cacheDataBlockPercent -= ch; + // If we go down too deep we have to stop here, 1% any way should be. cache.cacheDataBlockPercent = cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; } } else { + // Well, we have got overshooting. + // Mayby it is just short-term fluctuation and we can stay in this mode. + // It help avoid permature exit during short-term fluctuation. + // If overshooting less than 90%, we will try to increase the percent of + // caching blocks and hope it is enough. if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { - // It help avoid exit during short-term fluctuation + // Simple logic: more overshooting - more caching blocks (backpressure) int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); cache.cacheDataBlockPercent += ch; + // But it can't be more then 100%, so check it. cache.cacheDataBlockPercent = cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; } else { + // Looks like heavy reading is over. + // Just exit form this mode. heavyEvictionCount = 0; cache.cacheDataBlockPercent = 100; } From 3baf61e60e62732053c0ee48b0f112ac20d690ae Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 11:09:17 +0300 Subject: [PATCH 037/769] Update TestLruBlockCache.java Fixed indents --- .../hbase/io/hfile/TestLruBlockCache.java | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 6f572bd1bff3..b30575ebc55c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1059,19 +1059,19 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); final LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.5f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - maxSize, - heavyEvictionCountLimit, - 200, - 0.01f); + new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + heavyEvictionCountLimit, + 200, + 0.01f); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); From 17e5d2858ed5df252b5eb389e6435093ef459515 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 12:49:03 +0300 Subject: [PATCH 038/769] Update LruBlockCache.java fixed bug --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 7c36a64cf519..1e6ad3785110 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -849,7 +849,7 @@ long evict() { stats.evict(); evictionInProgress = false; evictionLock.unlock(); - return bytesFreed; + return bytesToFree; } } From e5b88141fa86721abfcfb3fc4f476fce93f2f7aa Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 21:02:02 +0300 Subject: [PATCH 039/769] Update LruBlockCache.java Fixed bug --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 1e6ad3785110..04f12c80d21a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1061,7 +1061,7 @@ public void run() { // how to change percent of caching blocks. freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; - if (freedDataOverheadPercent > 100) { + if (freedDataOverheadPercent > 0) { // Now we are in the situation when we are above the limit // But maybe we are going to ignore it because it will end quite soon heavyEvictionCount++; From abcbea37d63dbcd2340eb7eb0aeca19d7f6631a8 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 9 Jun 2020 13:56:11 +0300 Subject: [PATCH 040/769] Update LruBlockCache.java corrected by review --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 42 +++++++++---------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 04f12c80d21a..cb38e4f9339a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -159,7 +159,7 @@ public class LruBlockCache implements FirstLevelBlockCache { // Because 2147483647 is about ~680 years (after that it will start to work) // We can set it to 0-10 and get the profit right now. // (see details https://issues.apache.org/jira/browse/HBASE-23887). - private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 2147483647; + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = "hbase.lru.cache.heavy.eviction.mb.size.limit"; @@ -1015,8 +1015,7 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { - long bytesFreed; - long mbFreedSum = 0; + long freedSumMb = 0; int heavyEvictionCount = 0; int freedDataOverheadPercent = 0; long startTime = System.currentTimeMillis(); @@ -1031,7 +1030,7 @@ public void run() { } LruBlockCache cache = this.cache.get(); if (cache == null) break; - bytesFreed = cache.evict(); + freedSumMb += cache.evict()/1024/1024; /* * Sometimes we are reading more data than can fit into BlockCache * and it is the cause a high rate of evictions. @@ -1051,16 +1050,14 @@ public void run() { // because we get comparable volumes of freed bytes each time. // 10s because this is default period to run evict() (see above this.wait) long stopTime = System.currentTimeMillis(); - if (stopTime - startTime <= 1000 * 10 - 1) { - mbFreedSum += bytesFreed/1024/1024; - } else { + if ((stopTime - startTime) > 1000 * 10 - 1) { // Here we have to calc what situation we have got. // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" // and can calculte overhead on it. // We will use this information to decide, // how to change percent of caching blocks. freedDataOverheadPercent = - (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; + (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; if (freedDataOverheadPercent > 0) { // Now we are in the situation when we are above the limit // But maybe we are going to ignore it because it will end quite soon @@ -1079,16 +1076,15 @@ public void run() { // higher we can get better performance when heavy reading is stable. // But when reading is changing we can adjust to it and set // the coefficient to lower value. - int ch = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); // But practice shows that 15% of reducing is quite enough. // We are not greedy (it could lead to premature exit). - ch = ch > 15 ? 15 : ch; - ch = ch < 0 ? 0 : ch; // I think it will never happen but check for sure + change = Math.min(15, change); + change = Math.max(0, change); // I think it will never happen but check for sure // So this is the key point, here we are reducing % of caching blocks - cache.cacheDataBlockPercent -= ch; + cache.cacheDataBlockPercent -= change; // If we go down too deep we have to stop here, 1% any way should be. - cache.cacheDataBlockPercent = - cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); } } else { // Well, we have got overshooting. @@ -1096,13 +1092,13 @@ public void run() { // It help avoid permature exit during short-term fluctuation. // If overshooting less than 90%, we will try to increase the percent of // caching blocks and hope it is enough. - if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { + if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { // Simple logic: more overshooting - more caching blocks (backpressure) - int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); - cache.cacheDataBlockPercent += ch; + int change = (int) (-freedDataOverheadPercent * 0.1 + 1); + cache.cacheDataBlockPercent += change; // But it can't be more then 100%, so check it. cache.cacheDataBlockPercent = - cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); } else { // Looks like heavy reading is over. // Just exit form this mode. @@ -1111,12 +1107,12 @@ public void run() { } } LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + - "heavy eviction counter: {}, " + - "current caching DataBlock (%): {}", - mbFreedSum, freedDataOverheadPercent, - heavyEvictionCount, cache.cacheDataBlockPercent); + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, + heavyEvictionCount, cache.cacheDataBlockPercent); - mbFreedSum = 0; + freedSumMb = 0; startTime = stopTime; } } From dc6b1fc1c19b5cee4b999e10104a2da45e405e66 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 9 Jun 2020 18:44:12 +0300 Subject: [PATCH 041/769] Update LruBlockCache.java fixed codestyle --- .../org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index cb38e4f9339a..ea4399693b84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1076,7 +1076,8 @@ public void run() { // higher we can get better performance when heavy reading is stable. // But when reading is changing we can adjust to it and set // the coefficient to lower value. - int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + int change = + (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); // But practice shows that 15% of reducing is quite enough. // We are not greedy (it could lead to premature exit). change = Math.min(15, change); @@ -1097,8 +1098,7 @@ public void run() { int change = (int) (-freedDataOverheadPercent * 0.1 + 1); cache.cacheDataBlockPercent += change; // But it can't be more then 100%, so check it. - cache.cacheDataBlockPercent = - cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); + cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); } else { // Looks like heavy reading is over. // Just exit form this mode. From bc025badbccf7043357a8c52713fb43125388b68 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 9 Jun 2020 18:47:36 +0300 Subject: [PATCH 042/769] Update LruBlockCache.java removeed whitespaces --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index ea4399693b84..28c1d2416eee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -461,7 +461,7 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // (see details: https://issues.apache.org/jira/browse/HBASE-23887) // How to calculate it can find inside EvictionThread class. if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { - // It works like filter - blocks which two last digits of offset + // It works like filter - blocks which two last digits of offset // more than we calculate in Eviction Thread will not put into BlockCache if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { return; @@ -1031,7 +1031,7 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; freedSumMb += cache.evict()/1024/1024; - /* + /* * Sometimes we are reading more data than can fit into BlockCache * and it is the cause a high rate of evictions. * This in turn leads to heavy Garbage Collector works. @@ -1043,8 +1043,8 @@ public void run() { * when evict() works very active and save CPU for other jobs. * More delails: https://issues.apache.org/jira/browse/HBASE-23887 */ - - // First of all we have to control how much time + + // First of all we have to control how much time // has passed since previuos evict() was launched // This is should be almost the same time (+/- 10s) // because we get comparable volumes of freed bytes each time. @@ -1068,13 +1068,13 @@ public void run() { // It depends on: // 1. Overhead - if overhead is big we could more aggressive // reducing amount of caching blocks. - // 2. How fast we want to get the result. If we know that our - // heavy reading for a long time, we don't want to wait and can + // 2. How fast we want to get the result. If we know that our + // heavy reading for a long time, we don't want to wait and can // increase the coefficient and get good performance quite soon. - // But if we don't sure we can do it slowly and it could prevent - // premature exit from this mode. So, when the coefficient is + // But if we don't sure we can do it slowly and it could prevent + // premature exit from this mode. So, when the coefficient is // higher we can get better performance when heavy reading is stable. - // But when reading is changing we can adjust to it and set + // But when reading is changing we can adjust to it and set // the coefficient to lower value. int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); @@ -1088,10 +1088,10 @@ public void run() { cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); } } else { - // Well, we have got overshooting. + // Well, we have got overshooting. // Mayby it is just short-term fluctuation and we can stay in this mode. // It help avoid permature exit during short-term fluctuation. - // If overshooting less than 90%, we will try to increase the percent of + // If overshooting less than 90%, we will try to increase the percent of // caching blocks and hope it is enough. if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { // Simple logic: more overshooting - more caching blocks (backpressure) @@ -1100,7 +1100,7 @@ public void run() { // But it can't be more then 100%, so check it. cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); } else { - // Looks like heavy reading is over. + // Looks like heavy reading is over. // Just exit form this mode. heavyEvictionCount = 0; cache.cacheDataBlockPercent = 100; From 6620759094ceeddd805bbe7426a1f6e625eee348 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Wed, 10 Jun 2020 00:03:49 +0300 Subject: [PATCH 043/769] Update LruBlockCache.java fixed whitespaces --- .../org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 28c1d2416eee..e2e37f205ec5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1054,7 +1054,7 @@ public void run() { // Here we have to calc what situation we have got. // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" // and can calculte overhead on it. - // We will use this information to decide, + // We will use this information to decide, // how to change percent of caching blocks. freedDataOverheadPercent = (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; @@ -1065,7 +1065,7 @@ public void run() { if (heavyEvictionCount > cache.heavyEvictionCountLimit) { // It is going for a long time and we have to reduce of caching // blocks now. So we calculate here how many blocks we want to skip. - // It depends on: + // It depends on: // 1. Overhead - if overhead is big we could more aggressive // reducing amount of caching blocks. // 2. How fast we want to get the result. If we know that our @@ -1076,7 +1076,7 @@ public void run() { // higher we can get better performance when heavy reading is stable. // But when reading is changing we can adjust to it and set // the coefficient to lower value. - int change = + int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); // But practice shows that 15% of reducing is quite enough. // We are not greedy (it could lead to premature exit). From 96dc3e9dbad0ff8bf207bfc563dcd5d4b8545997 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Wed, 10 Jun 2020 08:27:54 +0300 Subject: [PATCH 044/769] Update LruBlockCache.java fixed bug --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index e2e37f205ec5..3dfbb4189e3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1015,6 +1015,7 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { + enteringRun = true; long freedSumMb = 0; int heavyEvictionCount = 0; int freedDataOverheadPercent = 0; From 6b63c74ec146b16129047fcdc8ab3b4991114632 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Thu, 11 Jun 2020 10:18:42 +0300 Subject: [PATCH 045/769] Update LruBlockCache.java simplify --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 3dfbb4189e3c..66980173c91a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1059,7 +1059,7 @@ public void run() { // how to change percent of caching blocks. freedDataOverheadPercent = (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; - if (freedDataOverheadPercent > 0) { + if (freedSumMb > cache.heavyEvictionMbSizeLimit) { // Now we are in the situation when we are above the limit // But maybe we are going to ignore it because it will end quite soon heavyEvictionCount++; From 5e12c0aae59ff5c730ea9e67c51d2b515bb99342 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 18 Sep 2020 13:20:44 -0700 Subject: [PATCH 046/769] HBASE-25061 Update default URL to KEYS file in `hbase-vote.sh` (#2416) Co-authored-by: Viraj Jasani Signed-off-by: Sean Busbey Signed-off-by: Jan Hentschel Signed-off-by: Viraj Jasani --- dev-support/hbase-vote.sh | 4 ++-- src/main/asciidoc/_chapters/developer.adoc | 2 +- src/site/xdoc/downloads.xml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/hbase-vote.sh b/dev-support/hbase-vote.sh index ec9340a0b0e1..88e22849a92f 100755 --- a/dev-support/hbase-vote.sh +++ b/dev-support/hbase-vote.sh @@ -37,7 +37,7 @@ Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file- e.g. https://dist.apache.org/repos/dist/dev/hbase/hbase-RC0/ -k | --key '' A signature of the public key, e.g. 9AD2AE49 -f | --keys-file-url '' the URL of the key file, default is - http://www.apache.org/dist/hbase/KEYS + https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests __EOF @@ -103,7 +103,7 @@ BUILD_FROM_SOURCE_PASSED=0 UNIT_TEST_PASSED=0 function download_and_import_keys() { - KEY_FILE_URL="${KEY_FILE_URL:-https://www.apache.org/dist/hbase/KEYS}" + KEY_FILE_URL="${KEY_FILE_URL:-https://downloads.apache.org/hbase/KEYS}" echo "Obtain and import the publisher key(s) from ${KEY_FILE_URL}" # download the keys file into file KEYS wget -O KEYS "${KEY_FILE_URL}" diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 6987ffd6b637..27c369255459 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -1160,7 +1160,7 @@ Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-f e.g. https://dist.apache.org/repos/dist/dev/hbase/hbase-RC0/ -k | --key '' A signature of the public key, e.g. 9AD2AE49 -f | --keys-file-url '' the URL of the key file, default is - http://www.apache.org/dist/hbase/KEYS + https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target ---- * If you see any unit test failures, please call out the solo test result and whether it's part of flaky (nightly) tests dashboard, e.g. link:https://builds.apache.org/view/H-L/view/HBase/job/HBase-Find-Flaky-Tests/job/master/lastSuccessfulBuild/artifact/dashboard.html[dashboard of master branch] (please change the test branch accordingly). diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 822a819b31d7..c49f09a9177e 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -28,7 +28,7 @@ under the License.

The below table lists mirrored release artifacts and their associated hashes and signatures available ONLY at apache.org. The keys used to sign releases can be found in our published - KEYS file. See + KEYS file. See Verify The Integrity Of The Files for how to verify your mirrored downloads.

From 16831ec51591c1a8d254ba65dcb4c19ccc28b28a Mon Sep 17 00:00:00 2001 From: GeorryHuang <215175212@qq.com> Date: Sat, 19 Sep 2020 14:34:30 +0800 Subject: [PATCH 047/769] HBASE-24857:Fix several problems when starting webUI (#2245) Signed-off-by: Viraj Jasani Signed-off-by: Duo Zhang --- bin/hbase | 2 +- .../java/org/apache/hadoop/hbase/tool/CanaryTool.java | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/bin/hbase b/bin/hbase index 127fa3c7fdd8..dd6cfeef644f 100755 --- a/bin/hbase +++ b/bin/hbase @@ -258,7 +258,7 @@ if [ "${INTERNAL_CLASSPATH}" != "true" ]; then # If command needs our shaded mapreduce, use it # N.B "mapredcp" is not included here because in the shaded case it skips our built classpath - declare -a commands_in_mr_jar=("hbck" "snapshot" "canary" "regionsplitter" "pre-upgrade") + declare -a commands_in_mr_jar=("hbck" "snapshot" "regionsplitter" "pre-upgrade") for c in "${commands_in_mr_jar[@]}"; do if [ "${COMMAND}" = "${c}" ]; then # If we didn't find a jar above, this will just be blank and the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index d42e62991596..59e0e6cc7916 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -140,7 +140,7 @@ private void putUpWebUI() throws IOException { try { InfoServer infoServer = new InfoServer("canary", addr, port, false, conf); infoServer.addUnprivilegedServlet("canary", "/canary-status", CanaryStatusServlet.class); - infoServer.setAttribute("sink", this.sink); + infoServer.setAttribute("sink", getSink(conf, RegionStdOutSink.class)); infoServer.start(); LOG.info("Bind Canary http info server to {}:{} ", addr, port); } catch (BindException e) { @@ -979,8 +979,10 @@ public int run(String[] args) throws Exception { monitorTargets = new String[length]; System.arraycopy(args, index, monitorTargets, 0, length); } - - putUpWebUI(); + if (interval > 0) { + //Only show the web page in daemon mode + putUpWebUI(); + } if (zookeeperMode) { return checkZooKeeper(); } else if (regionServerMode) { From 6371914adb3f320f5ef1fde9bff4ea27cd568ae1 Mon Sep 17 00:00:00 2001 From: Hyeran Lee Date: Sat, 19 Sep 2020 15:36:06 +0900 Subject: [PATCH 048/769] HBASE-25057: Fix typo memeber (#2414) Signed-off-by: Viraj Jasani Signed-off-by: Duo Zhang Signed-off-by: Jan Hentschel --- .../regionserver/LogRollRegionServerProcedureManager.java | 2 +- .../java/org/apache/hadoop/hbase/procedure/Subprocedure.java | 4 ++-- .../flush/RegionServerFlushTableProcedureManager.java | 2 +- .../regionserver/snapshot/RegionServerSnapshotManager.java | 2 +- .../hadoop/hbase/procedure/SimpleRSProcedureManager.java | 2 +- .../apache/hadoop/hbase/procedure/TestProcedureMember.java | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index 5d087a65f91f..f09e71005598 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -116,7 +116,7 @@ public void stop(boolean force) throws IOException { /** * If in a running state, creates the specified subprocedure for handling a backup procedure. - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(byte[] data) { // don't run a backup if the parent is stop(ping) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java index 4b6924438377..9e45ad514369 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java @@ -39,7 +39,7 @@ * member), {@link #insideBarrier()} (execute while globally barriered and release barrier) and * {@link #cleanup(Exception)} (release state associated with subprocedure.) * - * When submitted to a ProcedureMemeber, the call method is executed in a separate thread. + * When submitted to a ProcedureMember, the call method is executed in a separate thread. * Latches are use too block its progress and trigger continuations when barrier conditions are * met. * @@ -147,7 +147,7 @@ private void rethrowException() throws ForeignException { * Execute the Subprocedure {@link #acquireBarrier()} and {@link #insideBarrier()} methods * while keeping some state for other threads to access. * - * This would normally be executed by the ProcedureMemeber when a acquire message comes from the + * This would normally be executed by the ProcedureMember when a acquire message comes from the * coordinator. Rpcs are used to spend message back to the coordinator after different phases * are executed. Any exceptions caught during the execution (except for InterruptedException) get * converted and propagated to coordinator via {@link ProcedureMemberRpcs#sendMemberAborted( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 0a72d9a738a5..1e95d15881fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -132,7 +132,7 @@ public void stop(boolean force) throws IOException { * * @param table * @param family - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(String table, String family) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index cc92003315f2..a01d118718d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -150,7 +150,7 @@ public void stop(boolean force) throws IOException { * the snapshot verification step. * * @param snapshot - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index cee0656443b7..9ccee661586a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -85,7 +85,7 @@ public String getProcedureSignature() { /** * If in a running state, creates the specified subprocedure for handling a procedure. - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(String name) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java index b95ddf20aad9..61146a6c7070 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java @@ -204,7 +204,7 @@ public void testSendMemberAcquiredCommsFailure() throws Exception { new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { - throw new IOException("Forced IOException in memeber prepare"); + throw new IOException("Forced IOException in member prepare"); } }).when(mockMemberComms).sendMemberAcquired(any()); @@ -288,7 +288,7 @@ public void testMemberCommitException() throws Exception { new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { - throw new IOException("Forced IOException in memeber prepare"); + throw new IOException("Forced IOException in member prepare"); } }).when(spySub).insideBarrier(); From 58618e35ad945beac34b6407a7a8d04a9e424439 Mon Sep 17 00:00:00 2001 From: Joseph295 <517536891@qq.com> Date: Sat, 19 Sep 2020 14:53:13 +0800 Subject: [PATCH 049/769] HBASE-24991 Replace MovedRegionsCleaner with guava cache (#2357) Signed-off-by: stack Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/HRegionServer.java | 112 +++--------------- .../hadoop/hbase/TestMovedRegionCache.java | 104 ++++++++++++++++ .../hadoop/hbase/TestMovedRegionsCleaner.java | 95 --------------- .../regionserver/TestRSChoresScheduled.java | 7 -- 4 files changed, 118 insertions(+), 200 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index fc0e3d75f592..c33be53d4538 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -277,6 +277,13 @@ public class HRegionServer extends Thread implements private final Cache executedRegionProcedures = CacheBuilder.newBuilder().expireAfterAccess(600, TimeUnit.SECONDS).build(); + /** + * Used to cache the moved-out regions + */ + private final Cache movedRegionInfoCache = + CacheBuilder.newBuilder().expireAfterWrite(movedRegionCacheExpiredTime(), + TimeUnit.MILLISECONDS).build(); + private MemStoreFlusher cacheFlusher; private HeapMemoryManager hMemManager; @@ -476,11 +483,6 @@ public class HRegionServer extends Thread implements */ protected String clusterId; - /** - * Chore to clean periodically the moved region list - */ - private MovedRegionsCleaner movedRegionsCleaner; - // chore for refreshing store files for secondary regions private StorefileRefresherChore storefileRefresher; @@ -1079,10 +1081,6 @@ public void run() { mobFileCache.shutdown(); } - if (movedRegionsCleaner != null) { - movedRegionsCleaner.stop("Region Server stopping"); - } - // Send interrupts to wake up threads if sleeping so they notice shutdown. // TODO: Should we check they are alive? If OOME could have exited already if (this.hMemManager != null) this.hMemManager.stop(); @@ -2051,9 +2049,6 @@ private void startServices() throws IOException { if (this.storefileRefresher != null) { choreService.scheduleChore(storefileRefresher); } - if (this.movedRegionsCleaner != null) { - choreService.scheduleChore(movedRegionsCleaner); - } if (this.fsUtilizationChore != null) { choreService.scheduleChore(fsUtilizationChore); } @@ -2111,9 +2106,6 @@ private void initializeThreads() { slowLogTableOpsChore = new SlowLogTableOpsChore(this, duration, this.namedQueueRecorder); } - // Create the thread to clean the moved regions list - movedRegionsCleaner = MovedRegionsCleaner.create(this); - if (this.nonceManager != null) { // Create the scheduled chore that cleans up nonces. nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this); @@ -2614,7 +2606,6 @@ protected void stopServiceThreads() { choreService.cancelChore(healthCheckChore); choreService.cancelChore(executorStatusChore); choreService.cancelChore(storefileRefresher); - choreService.cancelChore(movedRegionsCleaner); choreService.cancelChore(fsUtilizationChore); choreService.cancelChore(slowLogTableOpsChore); // clean up the remaining scheduled chores (in case we missed out any) @@ -3485,12 +3476,10 @@ public ServerNonceManager getNonceManager() { private static class MovedRegionInfo { private final ServerName serverName; private final long seqNum; - private final long moveTime; MovedRegionInfo(ServerName serverName, long closeSeqNum) { this.serverName = serverName; this.seqNum = closeSeqNum; - this.moveTime = EnvironmentEdgeManager.currentTime(); } public ServerName getServerName() { @@ -3500,18 +3489,8 @@ public ServerName getServerName() { public long getSeqNum() { return seqNum; } - - long getMoveTime() { - return moveTime; - } } - /** - * This map will contains all the regions that we closed for a move. - * We add the time it was moved as we don't want to keep too old information - */ - private Map movedRegions = new ConcurrentHashMap<>(3000); - /** * We need a timeout. If not there is a risk of giving a wrong information: this would double * the number of network calls instead of reducing them. @@ -3525,86 +3504,23 @@ private void addToMovedRegions(String encodedName, ServerName destination, long } LOG.info("Adding " + encodedName + " move to " + destination + " record at close sequenceid=" + closeSeqNum); - movedRegions.put(encodedName, new MovedRegionInfo(destination, closeSeqNum)); + movedRegionInfoCache.put(encodedName, new MovedRegionInfo(destination, closeSeqNum)); } void removeFromMovedRegions(String encodedName) { - movedRegions.remove(encodedName); - } - - private MovedRegionInfo getMovedRegion(final String encodedRegionName) { - MovedRegionInfo dest = movedRegions.get(encodedRegionName); - - long now = EnvironmentEdgeManager.currentTime(); - if (dest != null) { - if (dest.getMoveTime() > (now - TIMEOUT_REGION_MOVED)) { - return dest; - } else { - movedRegions.remove(encodedRegionName); - } - } - - return null; + movedRegionInfoCache.invalidate(encodedName); } - /** - * Remove the expired entries from the moved regions list. - */ - protected void cleanMovedRegions() { - final long cutOff = System.currentTimeMillis() - TIMEOUT_REGION_MOVED; - - movedRegions.entrySet().removeIf(e -> e.getValue().getMoveTime() < cutOff); + @VisibleForTesting + public MovedRegionInfo getMovedRegion(String encodedRegionName) { + return movedRegionInfoCache.getIfPresent(encodedRegionName); } - /* - * Use this to allow tests to override and schedule more frequently. - */ - - protected int movedRegionCleanerPeriod() { + @VisibleForTesting + public int movedRegionCacheExpiredTime() { return TIMEOUT_REGION_MOVED; } - /** - * Creates a Chore thread to clean the moved region cache. - */ - protected final static class MovedRegionsCleaner extends ScheduledChore implements Stoppable { - private HRegionServer regionServer; - Stoppable stoppable; - - private MovedRegionsCleaner( - HRegionServer regionServer, Stoppable stoppable){ - super("MovedRegionsCleaner for region " + regionServer, stoppable, - regionServer.movedRegionCleanerPeriod()); - this.regionServer = regionServer; - this.stoppable = stoppable; - } - - static MovedRegionsCleaner create(HRegionServer rs){ - Stoppable stoppable = new Stoppable() { - private volatile boolean isStopped = false; - @Override public void stop(String why) { isStopped = true;} - @Override public boolean isStopped() {return isStopped;} - }; - - return new MovedRegionsCleaner(rs, stoppable); - } - - @Override - protected void chore() { - regionServer.cleanMovedRegions(); - } - - @Override - public void stop(String why) { - stoppable.stop(why); - } - - @Override - public boolean isStopped() { - return stoppable.isStopped(); - } - } - private String getMyEphemeralNodePath() { return ZNodePaths.joinZNode(this.zooKeeper.getZNodePaths().rsZNode, getServerName().toString()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java new file mode 100644 index 000000000000..ea0b9f87e173 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.io.IOException; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Test whether moved region cache is correct + */ +@Category({ MiscTests.class, MediumTests.class }) +public class TestMovedRegionCache { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMovedRegionCache.class); + + @Rule + public TestName name = new TestName(); + + private HBaseTestingUtility UTIL; + private MiniZooKeeperCluster zkCluster; + private HRegionServer source; + private HRegionServer dest; + private RegionInfo movedRegionInfo; + + @Before + public void setup() throws Exception { + UTIL = new HBaseTestingUtility(); + zkCluster = UTIL.startMiniZKCluster(); + StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(2).build(); + MiniHBaseCluster cluster = UTIL.startMiniHBaseCluster(option); + source = cluster.getRegionServer(0); + dest = cluster.getRegionServer(1); + assertEquals(2, cluster.getRegionServerThreads().size()); + TableName tableName = TableName.valueOf(name.getMethodName()); + UTIL.createTable(tableName, Bytes.toBytes("cf")); + UTIL.waitTableAvailable(tableName, 30_000); + movedRegionInfo = Iterables.getOnlyElement(cluster.getRegions(tableName)).getRegionInfo(); + UTIL.getAdmin().move(movedRegionInfo.getEncodedNameAsBytes(), source.getServerName()); + UTIL.waitFor(2000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return source.getOnlineRegion(movedRegionInfo.getRegionName()) != null; + } + }); + } + + @After + public void after() throws Exception { + UTIL.shutdownMiniCluster(); + if (zkCluster != null) { + zkCluster.shutdown(); + } + } + + @Test + public void testMovedRegionsCache() throws IOException, InterruptedException { + UTIL.getAdmin().move(movedRegionInfo.getEncodedNameAsBytes(), dest.getServerName()); + UTIL.waitFor(2000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return dest.getOnlineRegion(movedRegionInfo.getRegionName()) != null; + } + }); + assertNotNull("Moved region NOT in the cache!", source.getMovedRegion( + movedRegionInfo.getEncodedName())); + Thread.sleep(source.movedRegionCacheExpiredTime()); + assertNull("Expired moved region exist in the cache!", source.getMovedRegion( + movedRegionInfo.getEncodedName())); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java deleted file mode 100644 index 8932646ef9b9..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test whether background cleanup of MovedRegion entries is happening - */ -@Category({ MiscTests.class, MediumTests.class }) -public class TestMovedRegionsCleaner { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMovedRegionsCleaner.class); - - private final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - - public static int numCalls = 0; - - private static class TestMockRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer { - - public TestMockRegionServer(Configuration conf) throws IOException, InterruptedException { - super(conf); - } - - @Override - protected int movedRegionCleanerPeriod() { - return 500; - } - - @Override protected void cleanMovedRegions() { - // count the number of calls that are being made to this - // - numCalls++; - super.cleanMovedRegions(); - } - } - - @After public void after() throws Exception { - UTIL.shutdownMiniCluster(); - } - - @Before public void before() throws Exception { - UTIL.getConfiguration() - .setStrings(HConstants.REGION_SERVER_IMPL, TestMockRegionServer.class.getName()); - UTIL.startMiniCluster(1); - } - - /** - * Start the cluster, wait for some time and verify that the background - * MovedRegion cleaner indeed gets called - * - * @throws IOException - * @throws InterruptedException - */ - @Test public void testMovedRegionsCleaner() throws IOException, InterruptedException { - // We need to sleep long enough to trigger at least one round of background calls - // to MovedRegionCleaner happen. Currently the period is set to 500ms. - // Setting the sleep here for 2s just to be safe - // - UTIL.waitFor(2000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws IOException { - - // verify that there was at least one call to the cleanMovedRegions function - // - return numCalls > 0; - } - }); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java index de078618f829..95ce746ec5f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java @@ -78,13 +78,6 @@ private void testIfChoreScheduled(E choreObj) { @Test public void testDefaultScheduledChores() throws Exception { - // test if movedRegionsCleaner chore is scheduled by default in HRegionServer init - TestChoreField movedRegionsCleanerTestChoreField = - new TestChoreField<>(); - HRegionServer.MovedRegionsCleaner movedRegionsCleaner = movedRegionsCleanerTestChoreField - .getChoreObj("movedRegionsCleaner"); - movedRegionsCleanerTestChoreField.testIfChoreScheduled(movedRegionsCleaner); - // test if compactedHFilesDischarger chore is scheduled by default in HRegionServer init TestChoreField compactedHFilesDischargerTestChoreField = new TestChoreField<>(); From 4c9a92de28a8d1bbe164b09cf0988c74bae033f7 Mon Sep 17 00:00:00 2001 From: KevinSmile Date: Sat, 19 Sep 2020 19:45:10 +0530 Subject: [PATCH 050/769] HBASE-24481 REST - Fix incorrect response code of get-regions in rest api Closes #2425 Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/rest/RegionsResource.java | 3 +++ .../org/apache/hadoop/hbase/rest/TestTableResource.java | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index c3eed6a2eecd..6d6293fb1647 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -75,6 +75,9 @@ public Response get(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); try { TableName tableName = TableName.valueOf(tableResource.getName()); + if (!tableResource.exists()) { + throw new TableNotFoundException(tableName); + } TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); List locs; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index 0bece66df380..0c83a7fdeabc 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -261,5 +261,14 @@ public void testTableInfoPB() throws IOException, JAXBException { checkTableInfo(model); } + @Test + public void testTableNotFound() throws IOException { + String notExistTable = "notexist"; + Response response1 = client.get("/" + notExistTable + "/schema", Constants.MIMETYPE_JSON); + assertEquals(404, response1.getCode()); + Response response2 = client.get("/" + notExistTable + "/regions", Constants.MIMETYPE_XML); + assertEquals(404, response2.getCode()); + } + } From fd7260b506be2ce7abed8a79e0b7e9f05fc7c248 Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Sun, 20 Sep 2020 06:30:28 +0530 Subject: [PATCH 051/769] HBASE-25069: Display region name instead of encoded region name in HBCK report page. (#2428) Signed-off-by: Guanghao Zhang --- .../main/resources/hbase-webapps/master/hbck.jsp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp index 12757c6ad5ee..69b95e1a118e 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp @@ -247,8 +247,8 @@ <% for (Pair p : report.getHoles()) { %> - <%= p.getFirst().getEncodedName() %> - <%= p.getSecond().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> + <%= p.getSecond().getRegionNameAsString() %> <% } %> @@ -275,14 +275,14 @@ <% for (Pair p : report.getOverlaps()) { %> <% if (report.getMergedRegions().containsKey(p.getFirst())) { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <% } else { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <% } %> <% if (report.getMergedRegions().containsKey(p.getSecond())) { %> - <%= p.getSecond().getEncodedName() %> + <%= p.getSecond().getRegionNameAsString() %> <% } else { %> - <%= p.getSecond().getEncodedName() %> + <%= p.getSecond().getRegionNameAsString() %> <% } %> <% } %> @@ -318,7 +318,7 @@ <% for (Pair p: report.getUnknownServers()) { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <%= p.getSecond() %> <% } %> From 0956c34658c8486fea6bf691cef82abb1a82f5f4 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 20 Sep 2020 11:25:22 +0800 Subject: [PATCH 052/769] HBASE-25066 Use FutureUtils.rethrow in AsyncTableResultScanner to better catch the stack trace (#2420) Signed-off-by: Guanghao Zhang --- .../hbase/client/AsyncTableResultScanner.java | 9 +++--- .../apache/hadoop/hbase/util/FutureUtils.java | 32 +++++++++++-------- .../hadoop/hbase/util/TestFutureUtils.java | 2 +- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java index cd5d5adb290a..7fe6d120c3f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java @@ -23,13 +23,13 @@ import java.io.InterruptedIOException; import java.util.ArrayDeque; import java.util.Queue; - +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; /** * The {@link ResultScanner} implementation for {@link AsyncTable}. It will fetch data automatically @@ -140,8 +140,7 @@ public synchronized Result next() throws IOException { return null; } if (error != null) { - Throwables.propagateIfPossible(error, IOException.class); - throw new IOException(error); + FutureUtils.rethrow(error); } try { wait(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java index dfd9ead27854..67a7d84b26fe 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java @@ -139,19 +139,23 @@ private static void setStackTrace(Throwable error) { error.setStackTrace(newStackTrace); } - private static IOException rethrow(ExecutionException error) throws IOException { - Throwable cause = error.getCause(); - if (cause instanceof IOException) { - setStackTrace(cause); - throw (IOException) cause; - } else if (cause instanceof RuntimeException) { - setStackTrace(cause); - throw (RuntimeException) cause; - } else if (cause instanceof Error) { - setStackTrace(cause); - throw (Error) cause; + /** + * If we could propagate the given {@code error} directly, we will fill the stack trace with the + * current thread's stack trace so it is easier to trace where is the exception thrown. If not, we + * will just create a new IOException and then throw it. + */ + public static IOException rethrow(Throwable error) throws IOException { + if (error instanceof IOException) { + setStackTrace(error); + throw (IOException) error; + } else if (error instanceof RuntimeException) { + setStackTrace(error); + throw (RuntimeException) error; + } else if (error instanceof Error) { + setStackTrace(error); + throw (Error) error; } else { - throw new IOException(cause); + throw new IOException(error); } } @@ -165,7 +169,7 @@ public static T get(Future future) throws IOException { } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { - throw rethrow(e); + throw rethrow(e.getCause()); } } @@ -179,7 +183,7 @@ public static T get(Future future, long timeout, TimeUnit unit) throws IO } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { - throw rethrow(e); + throw rethrow(e.getCause()); } catch (TimeoutException e) { throw new TimeoutIOException(e); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java index 0eef0a6fc51d..f09d94739724 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java @@ -19,8 +19,8 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; From 3e13b3f0598c48014bc2737374dce309bcc351da Mon Sep 17 00:00:00 2001 From: stack Date: Sun, 20 Sep 2020 14:15:34 +0530 Subject: [PATCH 053/769] HBASE-24896 'Stuck' in static initialization creating RegionInfo instance Closes #2422 Untangle RegionInfo, RegionInfoBuilder, and MutableRegionInfo static initializations some. Move MutableRegionInfo from inner-class of RegionInfoBuilder to be (package private) standalone. Undo static initializing references from RI to RIB. Co-authored-by: Nick Dimiduk Signed-off-by: Bharath Vissapragada Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../hadoop/hbase/client/RegionInfo.java | 9 ++- .../TestRegionInfoStaticInitialization.java | 70 +++++++++++++++++++ 2 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 7a3a9af227f7..493b389b72aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -71,7 +71,9 @@ public interface RegionInfo extends Comparable { */ @Deprecated @InterfaceAudience.Private - RegionInfo UNDEFINED = RegionInfoBuilder.newBuilder(TableName.valueOf("__UNDEFINED__")).build(); + // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24627 + RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), + RegionInfo.DEFAULT_REPLICA_ID); /** * Separator used to demarcate the encodedName in a region name @@ -588,8 +590,9 @@ static String prettyPrint(final String encodedRegionName) { * @return the MOB {@link RegionInfo}. */ static RegionInfo createMobRegionInfo(TableName tableName) { - return RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")). - setRegionId(0).build(); + // Skipping reference to RegionInfoBuilder in this class. + return new MutableRegionInfo(tableName, Bytes.toBytes(".mob"), + HConstants.EMPTY_END_ROW, false, 0, DEFAULT_REPLICA_ID, false); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java new file mode 100644 index 000000000000..48729faae3ef --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Stream; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test for the tangled mess that is static initialization of our our {@link RegionInfo} and + * {@link RegionInfoBuilder}, as reported on HBASE-24896. The condition being tested can only be + * reproduced the first time a JVM loads the classes under test. Thus, this test is marked as a + * {@link LargeTests} because, under their current configuration, tests in that category are run + * in their own JVM instances. + */ +@SuppressWarnings("deprecation") +@Category({ RegionServerTests.class, LargeTests.class}) +public class TestRegionInfoStaticInitialization { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionInfoStaticInitialization.class); + + @Test + public void testParallelStaticInitialization() throws Exception { + // The JVM loads symbols lazily. These suppliers reference two symbols that, before this patch, + // are mutually dependent and expose a deadlock in the loading of symbols from RegionInfo and + // RegionInfoBuilder. + final Supplier retrieveUNDEFINED = () -> RegionInfo.UNDEFINED; + final Supplier retrieveMetaRegionInfo = + () -> RegionInfoBuilder.FIRST_META_REGIONINFO; + + // The test runs multiple threads that reference these mutually dependent symbols. In order to + // express this bug, these threads need to access these symbols at roughly the same time, so + // that the classloader is asked to materialize these symbols concurrently. These Suppliers are + // run on threads that have already been allocated, managed by the system's ForkJoin pool. + final CompletableFuture[] futures = Stream.of( + retrieveUNDEFINED, retrieveMetaRegionInfo, retrieveUNDEFINED, retrieveMetaRegionInfo) + .map(CompletableFuture::supplyAsync) + .toArray(CompletableFuture[]::new); + + // Loading classes should be relatively fast. 5 seconds is an arbitrary choice of timeout. It + // was chosen under the assumption that loading these symbols should complete much faster than + // this window. + CompletableFuture.allOf(futures).get(5, TimeUnit.SECONDS); + } +} From d41eb4f0f47c8ae7a7721b3b9d6aaf8f934405f1 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sun, 20 Sep 2020 14:31:22 +0530 Subject: [PATCH 054/769] HBASE-24896 : Jira number correction in comment (ADDENDUM) --- .../main/java/org/apache/hadoop/hbase/client/RegionInfo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 493b389b72aa..d860c7681a37 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -71,7 +71,7 @@ public interface RegionInfo extends Comparable { */ @Deprecated @InterfaceAudience.Private - // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24627 + // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24896 RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), RegionInfo.DEFAULT_REPLICA_ID); From b5adfe10314057e4a25a76b6c5bd0f1c4832682d Mon Sep 17 00:00:00 2001 From: niuyulin Date: Mon, 21 Sep 2020 02:08:43 +0800 Subject: [PATCH 055/769] HBASE-25072 Remove the unnecessary System.out.println in MasterRegistry (#2429) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/client/MasterRegistry.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 2a7ae16df47a..06582684c79c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -325,7 +325,6 @@ private static List transformServerNames(GetMastersResponse resp) { } CompletableFuture> getMasters() { - System.out.println("getMasters()"); return this . call((c, s, d) -> s.getMasters( c, GetMastersRequest.getDefaultInstance(), d), r -> r.getMasterServersCount() != 0, @@ -346,4 +345,4 @@ public void close() { rpcClient.close(); } } -} \ No newline at end of file +} From 8efd2509e9f4d44a699f0ad0e50c0b7ca7ad2b2a Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Mon, 21 Sep 2020 08:25:58 +0800 Subject: [PATCH 056/769] HBASE-24976 Printing the swallowed exception Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani Signed-off-by: Guanghao Zhang --- .../java/org/apache/hadoop/hbase/rest/RESTServer.java | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 2ad57e1b742c..c6f769ee6054 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -382,13 +382,8 @@ public synchronized void run() throws Exception { this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } - try { - // start server - server.start(); - } catch (Exception e) { - LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); - throw e; - } + // start server + server.start(); } public synchronized void join() throws Exception { @@ -442,6 +437,7 @@ public static void main(String[] args) throws Exception { server.run(); server.join(); } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } From 8c11007b7c1b59c2fd3d6e6aa1e12f997dd90455 Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Mon, 21 Sep 2020 08:27:38 +0800 Subject: [PATCH 057/769] Revert "HBASE-24976 Printing the swallowed exception" This reverts commit 8efd2509e9f4d44a699f0ad0e50c0b7ca7ad2b2a. --- .../java/org/apache/hadoop/hbase/rest/RESTServer.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index c6f769ee6054..2ad57e1b742c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -382,8 +382,13 @@ public synchronized void run() throws Exception { this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } - // start server - server.start(); + try { + // start server + server.start(); + } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); + throw e; + } } public synchronized void join() throws Exception { @@ -437,7 +442,6 @@ public static void main(String[] args) throws Exception { server.run(); server.join(); } catch (Exception e) { - LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } From 773185670bae57e044b5d11713cd796dfab1e198 Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Mon, 21 Sep 2020 08:25:58 +0800 Subject: [PATCH 058/769] HBASE-24976 REST Server failes to start without any error message Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani Signed-off-by: Guanghao Zhang --- .../java/org/apache/hadoop/hbase/rest/RESTServer.java | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 2ad57e1b742c..c6f769ee6054 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -382,13 +382,8 @@ public synchronized void run() throws Exception { this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } - try { - // start server - server.start(); - } catch (Exception e) { - LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); - throw e; - } + // start server + server.start(); } public synchronized void join() throws Exception { @@ -442,6 +437,7 @@ public static void main(String[] args) throws Exception { server.run(); server.join(); } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } From 36e8b0beec29a9ed01b92ea89c77b89bbeea94a7 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Tue, 22 Sep 2020 00:44:47 +0800 Subject: [PATCH 059/769] HBASE-25075 Fix typo in ReplicationProtobufUtil (#2431) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel Signed-off-by: Guanghao Zhang Signed-off-by: Duo Zhang --- .../client/AsyncRegionReplicaReplayRetryingCaller.java | 4 ++-- ...icationProtbufUtil.java => ReplicationProtobufUtil.java} | 2 +- .../regionserver/HBaseInterClusterReplicationEndpoint.java | 4 ++-- .../regionserver/ReplaySyncReplicationWALCallable.java | 4 ++-- .../hadoop/hbase/protobuf/TestReplicationProtobuf.java | 2 +- .../hadoop/hbase/replication/SyncReplicationTestBase.java | 6 +++--- 6 files changed, 11 insertions(+), 11 deletions(-) rename hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/{ReplicationProtbufUtil.java => ReplicationProtobufUtil.java} (99%) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java index 91d950265db8..0146c8b94d2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -104,7 +104,7 @@ private void call(HRegionLocation loc) { err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } - Pair p = ReplicationProtbufUtil + Pair p = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries, encodedRegionName, null, null, null); resetCallTimeout(); controller.setCellScanner(p.getSecond()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java index 4e2e5779303f..e47c92914f0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; @InterfaceAudience.Private -public class ReplicationProtbufUtil { +public class ReplicationProtobufUtil { /** * A helper to replicate a list of WAL entries using region server admin diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 816345f629d3..4e0669c2e9fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; @@ -632,7 +632,7 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) sinkPeer = replicationSinkMgr.getReplicationSink(); AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer(); try { - ReplicationProtbufUtil.replicateWALEntry(rsAdmin, + ReplicationProtobufUtil.replicateWALEntry(rsAdmin, entries.toArray(new Entry[entries.size()]), replicationClusterId, baseNamespaceDir, hfileArchiveDir, timeout); if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index 907faba3e404..e03bbe2b1c65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.util.KeyLocker; @@ -123,7 +123,7 @@ private void replayWAL(String wal) throws IOException { try (Reader reader = getReader(wal)) { List entries = readWALEntries(reader); while (!entries.isEmpty()) { - Pair pair = ReplicationProtbufUtil + Pair pair = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); ReplicateWALEntryRequest request = pair.getFirst(); rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java index 7b0e6cbdd8f1..615fa6445227 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java @@ -63,7 +63,7 @@ public void testGetCellScanner() throws IOException { all.add(a); all.add(b); all.add(c); - CellScanner scanner = ReplicationProtbufUtil.getCellScanner(all, 0); + CellScanner scanner = ReplicationProtobufUtil.getCellScanner(all, 0); testAdvancetHasSameRow(scanner, akv); // Skip over aa scanner.advance(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java index f11bd498bb1c..23753e211054 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; @@ -266,12 +266,12 @@ protected final void verifyReplicationRequestRejection(HBaseTestingUtility utili new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit()); } if (!expectedRejection) { - ReplicationProtbufUtil.replicateWALEntry( + ReplicationProtobufUtil.replicateWALEntry( connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); } else { try { - ReplicationProtbufUtil.replicateWALEntry( + ReplicationProtobufUtil.replicateWALEntry( connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); fail("Should throw IOException when sync-replication state is in A or DA"); From 8a6299bb387aaedcdab05224a462e63cd121fbb0 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Tue, 22 Sep 2020 01:00:49 +0800 Subject: [PATCH 060/769] HBASE-25076 fix typo in MasterRegion java doc (#2432) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/master/region/MasterRegion.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java index aa1b9d1257ea..81da59d6b665 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java @@ -79,9 +79,9 @@ * Notice that, you can use different root file system and WAL file system. Then the above directory * will be on two file systems, the root file system will have the data directory while the WAL * filesystem will have the WALs directory. The archived HFile will be moved to the global HFile - * archived directory with the {@link MasterRegionParams#archivedWalSuffix()} suffix. The archived + * archived directory with the {@link MasterRegionParams#archivedHFileSuffix()} suffix. The archived * WAL will be moved to the global WAL archived directory with the - * {@link MasterRegionParams#archivedHFileSuffix()} suffix. + * {@link MasterRegionParams#archivedWalSuffix()} suffix. */ @InterfaceAudience.Private public final class MasterRegion { From d1a3b660749cc8f1ad699795df5c5b556c9efc65 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 22 Sep 2020 17:53:15 +0200 Subject: [PATCH 061/769] HBASE-25081 Up the container nproc uplimit to 30000 (#2439) --- dev-support/hbase-personality.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index d7ca64cbb742..6f1355cf31a1 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -80,9 +80,9 @@ function personality_globals # TODO use PATCH_BRANCH to select jdk versions to use. # Yetus 0.7.0 enforces limits. Default proclimit is 1000. - # Up it. See HBASE-19902 for how we arrived at this number. + # Up it. See HBASE-25081 for how we arrived at this number. #shellcheck disable=SC2034 - PROC_LIMIT=12500 + PROC_LIMIT=30000 # Set docker container to run with 20g. Default is 4g in yetus. # See HBASE-19902 for how we arrived at 20g. From 70a947dc6b6e3e723a29fb7ce003613f776e09c6 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 22 Sep 2020 10:10:31 -0700 Subject: [PATCH 062/769] HBASE-25067 Edit of log messages around async WAL Replication; checkstyle fixes; and a bugfix (#2435) Editing logging around region replicas: shortening and adding context. Checkstyle fixes in edited files while I was in there. Signed-off-by: Duo Zhang --- .../replication/ReplicationPeerImpl.java | 4 +- .../apache/hadoop/hbase/master/HMaster.java | 2 +- .../procedure/EnableTableProcedure.java | 4 +- .../hadoop/hbase/regionserver/HRegion.java | 23 +++++----- .../hbase/regionserver/HRegionServer.java | 13 +++--- .../handler/AssignRegionHandler.java | 11 +++-- .../handler/RegionReplicaFlushHandler.java | 31 ++++++------- .../handler/UnassignRegionHandler.java | 14 +++--- .../regionserver/wal/ProtobufLogReader.java | 4 +- .../regionserver/ReplicationSource.java | 43 +++++++++---------- .../regionserver/WALEntryStream.java | 12 +++--- .../hbase/zookeeper/MetaTableLocator.java | 6 +-- 12 files changed, 82 insertions(+), 85 deletions(-) diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 22026e5284ce..08799856b754 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ public ReplicationPeerImpl(Configuration conf, String id, ReplicationPeerConfig SyncReplicationState newSyncReplicationState) { this.conf = conf; this.id = id; - this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED; + setPeerState(peerState); this.peerConfig = peerConfig; this.syncReplicationStateBits = syncReplicationState.value() | (newSyncReplicationState.value() << SHIFT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c87f144fc876..e4bd3c5fce22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1787,7 +1787,7 @@ public boolean balance(boolean force) throws IOException { toPrint = regionsInTransition.subList(0, max); truncated = true; } - LOG.info(prefix + "unning balancer because " + regionsInTransition.size() + + LOG.info(prefix + " not running balancer because " + regionsInTransition.size() + " region(s) in transition: " + toPrint + (truncated? "(truncated list)": "")); if (!force || metaInTransition) return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 8ad3ae6d33c6..6ca83fe01efe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -142,9 +142,9 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS } } else { // the replicasFound is less than the regionReplication - LOG.info("Number of replicas has increased. Assigning new region replicas." + + LOG.info("Number of replicas has increased for {}. Assigning new region replicas." + "The previous replica count was {}. The current replica count is {}.", - (currentMaxReplica + 1), configuredReplicaCount); + this.tableName, (currentMaxReplica + 1), configuredReplicaCount); regionsOfTable = RegionReplicaUtil.addReplicas(regionsOfTable, currentMaxReplica + 1, configuredReplicaCount); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index a09151564356..a208d9330042 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2438,11 +2438,13 @@ public FlushResultImpl flushcache(List families, status.setStatus("Acquiring readlock on region"); // block waiting for the lock for flushing cache lock.readLock().lock(); + boolean flushed = true; try { if (this.closed.get()) { String msg = "Skipping flush on " + this + " because closed"; LOG.debug(msg); status.abort(msg); + flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } if (coprocessorHost != null) { @@ -2459,15 +2461,11 @@ public FlushResultImpl flushcache(List families, if (!writestate.flushing && writestate.writesEnabled) { this.writestate.flushing = true; } else { - if (LOG.isDebugEnabled()) { - LOG.debug("NOT flushing memstore for region " + this - + ", flushing=" + writestate.flushing + ", writesEnabled=" - + writestate.writesEnabled); - } - String msg = "Not flushing since " - + (writestate.flushing ? "already flushing" - : "writes not enabled"); + String msg = "NOT flushing " + this + " as " + (writestate.flushing ? "already flushing" + : "writes are not enabled"); + LOG.debug(msg); status.abort(msg); + flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } } @@ -2505,8 +2503,11 @@ public FlushResultImpl flushcache(List families, } } finally { lock.readLock().unlock(); - LOG.debug("Flush status journal for {}:\n{}", this.getRegionInfo().getEncodedName(), - status.prettyPrintJournal()); + if (flushed) { + // Don't log this journal stuff if no flush -- confusing. + LOG.debug("Flush status journal for {}:\n{}", this.getRegionInfo().getEncodedName(), + status.prettyPrintJournal()); + } status.cleanup(); } } @@ -5032,7 +5033,7 @@ protected void checkReadsEnabled() throws IOException { public void setReadsEnabled(boolean readsEnabled) { if (readsEnabled && !this.writestate.readsEnabled) { - LOG.info(getRegionInfo().getEncodedName() + " : Enabling reads for region."); + LOG.info("Enabling reads for {}", getRegionInfo().getEncodedName()); } this.writestate.setReadsEnabled(readsEnabled); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index c33be53d4538..f14da2f6a17e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -123,6 +123,8 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.mob.MobFileCache; +import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore; import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore; @@ -139,8 +141,6 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler; import org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler; -import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; -import org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore; import org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; @@ -2466,10 +2466,13 @@ private void triggerFlushInPrimaryRegion(final HRegion region) { region.setReadsEnabled(false); // disable reads before marking the region as opened. // RegionReplicaFlushHandler might reset this. - // submit it to be handled by one of the handlers so that we do not block OpenRegionHandler + // Submit it to be handled by one of the handlers so that we do not block OpenRegionHandler if (this.executorService != null) { this.executorService.submit(new RegionReplicaFlushHandler(this, region)); - } + } else { + LOG.info("Executor is null; not running flush of primary region replica for {}", + region.getRegionInfo()); + } } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index 737f1653bc94..98d09b20e879 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -92,7 +92,7 @@ public void process() throws IOException { String regionName = regionInfo.getRegionNameAsString(); Region onlineRegion = rs.getRegion(encodedName); if (onlineRegion != null) { - LOG.warn("Received OPEN for the region:{}, which is already online", regionName); + LOG.warn("Received OPEN for {} which is already online", regionName); // Just follow the old behavior, do we need to call reportRegionStateTransition? Maybe not? // For normal case, it could happen that the rpc call to schedule this handler is succeeded, // but before returning to master the connection is broken. And when master tries again, we @@ -104,7 +104,7 @@ public void process() throws IOException { if (previous != null) { if (previous) { // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it. - LOG.info("Receiving OPEN for the region:{}, which we are already trying to OPEN" + + LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + " - ignoring this new request for this region.", regionName); } else { // The region is closing. This is possible as we will update the region state to CLOSED when @@ -113,7 +113,7 @@ public void process() throws IOException { // closing process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); LOG.info( - "Receiving OPEN for the region:{}, which we are trying to close, try again after {}ms", + "Receiving OPEN for {} which we are trying to close, try again after {}ms", regionName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } @@ -145,11 +145,10 @@ public void process() throws IOException { Boolean current = rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes()); if (current == null) { // Should NEVER happen, but let's be paranoid. - LOG.error("Bad state: we've just opened a region that was NOT in transition. Region={}", - regionName); + LOG.error("Bad state: we've just opened {} which was NOT in transition", regionName); } else if (!current) { // Should NEVER happen, but let's be paranoid. - LOG.error("Bad state: we've just opened a region that was closing. Region={}", regionName); + LOG.error("Bad state: we've just opened {} which was closing", regionName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index cc798cc2443f..dddf5532442c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; /** - * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to wal in + * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to WAL in * secondary region replicas. This means that a secondary region replica can serve some edits from - * it's memstore that that is still not flushed from primary. We do not want to allow secondary + * it's memstore that are still not flushed from primary. We do not want to allow secondary * region's seqId to go back in time, when this secondary region is opened elsewhere after a * crash or region move. We will trigger a flush cache in the primary region replica and wait * for observing a complete flush cycle before marking the region readsEnabled. This handler does @@ -50,7 +50,6 @@ */ @InterfaceAudience.Private public class RegionReplicaFlushHandler extends EventHandler { - private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaFlushHandler.class); private final AsyncClusterConnection connection; @@ -73,7 +72,7 @@ protected void handleException(Throwable t) { if (t instanceof InterruptedIOException || t instanceof InterruptedException) { LOG.error("Caught throwable while processing event " + eventType, t); } else if (t instanceof RuntimeException) { - server.abort("ServerAborting because a runtime exception was thrown", t); + server.abort("Server aborting", t); } else { // something fishy since we cannot flush the primary region until all retries (retries from // rpc times 35 trigger). We cannot close the region since there is no such mechanism to @@ -101,9 +100,9 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { RetryCounter counter = new RetryCounterFactory(maxAttempts, (int)pause).create(); if (LOG.isDebugEnabled()) { - LOG.debug("Attempting to do an RPC to the primary region replica " + ServerRegionReplicaUtil - .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " - + region.getRegionInfo().getEncodedName() + " to trigger a flush"); + LOG.debug("RPC'ing to primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + " from " + + region.getRegionInfo() + " to trigger FLUSH"); } while (!region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped()) { @@ -142,11 +141,11 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // then we have to wait for seeing the flush entry. All reads will be rejected until we see // a complete flush cycle or replay a region open event if (LOG.isDebugEnabled()) { - LOG.debug("Successfully triggered a flush of primary region replica " + + LOG.debug("Triggered flush of primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) .getRegionNameAsString() + - " of region " + region.getRegionInfo().getRegionNameAsString() + - " Now waiting and blocking reads until observing a full flush cycle"); + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until completes a full flush cycle"); } region.setReadsEnabled(true); break; @@ -154,12 +153,10 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { if (response.hasWroteFlushWalMarker()) { if (response.getWroteFlushWalMarker()) { if (LOG.isDebugEnabled()) { - LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " + - "region replica " + - ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) - .getRegionNameAsString() + - " of region " + region.getRegionInfo().getRegionNameAsString() + - " Now waiting and " + "blocking reads until observing a flush marker"); + LOG.debug("Triggered empty flush marker (memstore empty) on primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()). + getRegionNameAsString() + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until observing a flush marker"); } region.setReadsEnabled(true); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index 8b275d0e6ed1..0bf2543a445a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,19 +84,18 @@ public void process() throws IOException { // reportRegionStateTransition, so the HMaster will think the region is online, before we // actually open the region, as reportRegionStateTransition is part of the opening process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Received CLOSE for the region: {}, which we are already " + - "trying to OPEN. try again after {}ms", encodedName, backoff); + LOG.warn("Received CLOSE for {} which we are already " + + "trying to OPEN; try again after {}ms", encodedName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } else { - LOG.info("Received CLOSE for the region: {}, which we are already trying to CLOSE," + + LOG.info("Received CLOSE for {} which we are already trying to CLOSE," + " but not completed yet", encodedName); } return; } HRegion region = rs.getRegion(encodedName); if (region == null) { - LOG.debug( - "Received CLOSE for a region {} which is not online, and we're not opening/closing.", + LOG.debug("Received CLOSE for {} which is not ONLINE and we're not opening/closing.", encodedName); rs.getRegionsInTransitionInRS().remove(encodedNameBytes, Boolean.FALSE); return; @@ -114,10 +113,11 @@ public void process() throws IOException { if (region.close(abort) == null) { // XXX: Is this still possible? The old comment says about split, but now split is done at // master side, so... - LOG.warn("Can't close region {}, was already closed during close()", regionName); + LOG.warn("Can't close {} already closed during close()", regionName); rs.getRegionsInTransitionInRS().remove(encodedNameBytes, Boolean.FALSE); return; } + rs.removeRegion(region, destination); if (!rs.reportRegionStateTransition( new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, closeProcId, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 6f537df94900..0967c101ce58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -412,14 +412,14 @@ protected boolean readNext(Entry entry) throws IOException { + "because originalPosition is negative. last offset={}", this.inputStream.getPos(), eof); throw eof; } - // If stuck at the same place and we got and exception, lets go back at the beginning. + // If stuck at the same place and we got an exception, lets go back at the beginning. if (inputStream.getPos() == originalPosition) { if (resetPosition) { LOG.warn("Encountered a malformed edit, seeking to the beginning of the WAL since " + "current position and original position match at {}", originalPosition); seekOnFs(0); } else { - LOG.debug("Reached the end of file at position {}", originalPosition); + LOG.debug("EOF at position {}", originalPosition); } } else { // Else restore our position to original location in hope that next time through we will diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index dc0276dc7075..cb9a14d46b28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -38,7 +38,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; - import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; @@ -249,34 +248,35 @@ public void enqueueLog(Path wal) { LOG.trace("NOT replicating {}", wal); return; } - String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); - PriorityBlockingQueue queue = queues.get(logPrefix); + // Use WAL prefix as the WALGroupId for this peer. + String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); + PriorityBlockingQueue queue = queues.get(walPrefix); if (queue == null) { queue = new PriorityBlockingQueue<>(queueSizePerGroup, new LogsComparator()); // make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise // the shipper may quit immediately queue.put(wal); - queues.put(logPrefix, queue); + queues.put(walPrefix, queue); if (this.isSourceActive() && this.walEntryFilter != null) { // new wal group observed after source startup, start a new worker thread to track it // notice: it's possible that wal enqueued when this.running is set but worker thread // still not launched, so it's necessary to check workerThreads before start the worker - tryStartNewShipper(logPrefix, queue); + tryStartNewShipper(walPrefix, queue); } } else { queue.put(wal); } if (LOG.isTraceEnabled()) { - LOG.trace("{} Added wal {} to queue of source {}.", logPeerId(), logPrefix, + LOG.trace("{} Added wal {} to queue of source {}.", logPeerId(), walPrefix, this.replicationQueueInfo.getQueueId()); } this.metrics.incrSizeOfLogQueue(); // This will wal a warning for each new wal that gets created above the warn threshold int queueSize = queue.size(); if (queueSize > this.logQueueWarnThreshold) { - LOG.warn("{} WAL group {} queue size: {} exceeds value of " - + "replication.source.log.queue.warn: {}", logPeerId(), - logPrefix, queueSize, logQueueWarnThreshold); + LOG.warn("{} WAL group {} queue size: {} exceeds value of " + + "replication.source.log.queue.warn {}", logPeerId(), walPrefix, queueSize, + logQueueWarnThreshold); } } @@ -372,16 +372,10 @@ private void initializeWALEntryFilter(UUID peerClusterId) { private void tryStartNewShipper(String walGroupId, PriorityBlockingQueue queue) { workerThreads.compute(walGroupId, (key, value) -> { if (value != null) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "{} Someone has beat us to start a worker thread for wal group {}", - logPeerId(), key); - } + LOG.debug("{} preempted start of worker walGroupId={}", logPeerId(), walGroupId); return value; } else { - if (LOG.isDebugEnabled()) { - LOG.debug("{} Starting up worker for wal group {}", logPeerId(), key); - } + LOG.debug("{} starting worker for walGroupId={}", logPeerId(), walGroupId); ReplicationSourceShipper worker = createNewShipper(walGroupId, queue); ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, queue, worker.getStartPosition()); @@ -457,8 +451,7 @@ private ReplicationSourceWALReader createNewWALReader(String walGroupId, /** * Call after {@link #initializeWALEntryFilter(UUID)} else it will be null. - * @return The WAL Entry Filter Chain this ReplicationSource will use on WAL files filtering - * out WALEntry edits. + * @return WAL Entry Filter Chain to use on WAL files filtering *out* WALEntry edits. */ @VisibleForTesting WALEntryFilter getWalEntryFilter() { @@ -610,7 +603,7 @@ private void initialize() { this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } - LOG.info("{} Source: {}, is now replicating from cluster: {}; to peer cluster: {};", + LOG.info("{} queueId={} is replicating from cluster={} to cluster={}", logPeerId(), this.replicationQueueInfo.getQueueId(), clusterId, peerClusterId); initializeWALEntryFilter(peerClusterId); @@ -625,10 +618,13 @@ private void initialize() { @Override public void startup() { + if (this.sourceRunning) { + return; + } + this.sourceRunning = true; //Flag that signalizes uncaught error happening while starting up the source // and a retry should be attempted MutableBoolean retryStartup = new MutableBoolean(true); - this.sourceRunning = true; do { if(retryStartup.booleanValue()) { retryStartup.setValue(false); @@ -661,7 +657,8 @@ public void terminate(String reason, Exception cause, boolean clearMetrics) { terminate(reason, cause, clearMetrics, true); } - public void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { + public void terminate(String reason, Exception cause, boolean clearMetrics, + boolean join) { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { @@ -855,6 +852,6 @@ void removeWorker(ReplicationSourceShipper worker) { } private String logPeerId(){ - return "[Source for peer " + this.getPeer().getId() + "]:"; + return "peerId=" + this.getPeerId() + ","; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 0454e817b0e3..a95ee13c7379 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -174,7 +174,7 @@ private void setCurrentPath(Path path) { private void tryAdvanceEntry() throws IOException { if (checkReader()) { boolean beingWritten = readNextEntryAndRecordReaderPosition(); - LOG.trace("reading wal file {}. Current open for write: {}", this.currentPath, beingWritten); + LOG.trace("Reading WAL {}; currently open for write={}", this.currentPath, beingWritten); if (currentEntry == null && !beingWritten) { // no more entries in this log file, and the file is already closed, i.e, rolled // Before dequeueing, we should always get one more attempt at reading. @@ -222,7 +222,7 @@ private boolean checkAllBytesParsed() throws IOException { if (currentPositionOfReader < stat.getLen()) { final long skippedBytes = stat.getLen() - currentPositionOfReader; LOG.debug( - "Reached the end of WAL file '{}'. It was not closed cleanly," + + "Reached the end of WAL {}. It was not closed cleanly," + " so we did not parse {} bytes of data. This is normally ok.", currentPath, skippedBytes); metrics.incrUncleanlyClosedWALs(); @@ -230,7 +230,7 @@ private boolean checkAllBytesParsed() throws IOException { } } else if (currentPositionOfReader + trailerSize < stat.getLen()) { LOG.warn( - "Processing end of WAL file '{}'. At position {}, which is too far away from" + + "Processing end of WAL {} at position {}, which is too far away from" + " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", currentPath, currentPositionOfReader, stat.getLen(), getCurrentPathStat()); setPosition(0); @@ -241,7 +241,7 @@ private boolean checkAllBytesParsed() throws IOException { } } if (LOG.isTraceEnabled()) { - LOG.trace("Reached the end of log " + this.currentPath + ", and the length of the file is " + + LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + (stat == null ? "N/A" : stat.getLen())); } metrics.incrCompletedWAL(); @@ -249,7 +249,7 @@ private boolean checkAllBytesParsed() throws IOException { } private void dequeueCurrentLog() throws IOException { - LOG.debug("Reached the end of log {}", currentPath); + LOG.debug("EOF, closing {}", currentPath); closeReader(); logQueue.remove(); setPosition(0); @@ -264,7 +264,7 @@ private boolean readNextEntryAndRecordReaderPosition() throws IOException { long readerPos = reader.getPosition(); OptionalLong fileLength = walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath); if (fileLength.isPresent() && readerPos > fileLength.getAsLong()) { - // see HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted + // See HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted // data, so we need to make sure that we do not read beyond the committed file length. if (LOG.isDebugEnabled()) { LOG.debug("The provider tells us the valid length for " + currentPath + " is " + diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index bb02af3788aa..557ba77c5236 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -221,7 +221,7 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- ServerName required"); return; } - LOG.info("Setting hbase:meta (replicaId={}) location in ZooKeeper as {}, state={}", replicaId, + LOG.info("Setting hbase:meta replicaId={} location in ZooKeeper as {}, state={}", replicaId, serverName, state); // Make the MetaRegionServer pb and then get its bytes and save this as // the znode content. @@ -235,9 +235,9 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); } catch(KeeperException.NoNodeException nne) { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { - LOG.debug("META region location doesn't exist, create it"); + LOG.debug("hbase:meta region location doesn't exist, create it"); } else { - LOG.debug("META region location doesn't exist for replicaId=" + replicaId + + LOG.debug("hbase:meta region location doesn't exist for replicaId=" + replicaId + ", create it"); } ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), From 17ebf917ba354e4632b726323b2b32af3aa6c8de Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 18 Sep 2020 17:29:23 -0700 Subject: [PATCH 063/769] HBASE-25068 Pass WALFactory to Replication so it knows of all WALProviders, not just default/user-space Pass WALFactory to Replication instead of WALProvider. WALFactory has all WALProviders in it, not just the user-space WALProvider. Do this so ReplicationService has access to all WALProviders in the Server (To be exploited by the follow-on patch in HBASE-25055) --- .../hadoop/hbase/regionserver/HRegionServer.java | 15 +++++++-------- .../hbase/regionserver/ReplicationService.java | 11 ++++------- .../replication/regionserver/Replication.java | 8 ++++---- .../regionserver/ReplicationSyncUp.java | 6 ++++-- .../hbase/replication/TestReplicationBase.java | 2 +- .../TestReplicationSourceManager.java | 3 ++- 6 files changed, 22 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index f14da2f6a17e..8abede5b272a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1911,8 +1911,7 @@ private void setupWALAndReplication() throws IOException { throw new IOException("Can not create wal directory " + logDir); } // Instantiate replication if replication enabled. Pass it the log directories. - createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, - factory.getWALProvider()); + createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); } this.walFactory = factory; } @@ -3063,7 +3062,7 @@ public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { * Load the replication executorService objects, if any */ private static void createNewReplicationInstance(Configuration conf, HRegionServer server, - FileSystem walFs, Path walDir, Path oldWALDir, WALProvider walProvider) throws IOException { + FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException { // read in the name of the source replication class from the config file. String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); @@ -3076,19 +3075,19 @@ private static void createNewReplicationInstance(Configuration conf, HRegionServ // only one object. if (sourceClassname.equals(sinkClassname)) { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); server.replicationSinkHandler = (ReplicationSinkService) server.replicationSourceHandler; } else { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); server.replicationSinkHandler = newReplicationInstance(sinkClassname, - ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory); } } private static T newReplicationInstance(String classname, Class xface, Configuration conf, HRegionServer server, FileSystem walFs, Path logDir, - Path oldLogDir, WALProvider walProvider) throws IOException { + Path oldLogDir, WALFactory walFactory) throws IOException { final Class clazz; try { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); @@ -3097,7 +3096,7 @@ private static T newReplicationInstance(String cl throw new IOException("Could not find class for " + classname); } T service = ReflectionUtils.newInstance(clazz, conf); - service.initialize(server, walFs, logDir, oldLogDir, walProvider); + service.initialize(server, walFs, logDir, oldLogDir, walFactory); return service; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index e9bbaea8ae46..33b3321755fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; -import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,14 +32,11 @@ */ @InterfaceAudience.Private public interface ReplicationService { - /** * Initializes the replication service object. - * @param walProvider can be null if not initialized inside a live region server environment, for - * example, {@code ReplicationSyncUp}. */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALProvider walProvider) - throws IOException; + void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALFactory walFactory) + throws IOException; /** * Start replication services. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 195877bf5f3c..d8a696c7172e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.yetus.audience.InterfaceAudience; @@ -89,7 +90,7 @@ public Replication() { @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, - WALProvider walProvider) throws IOException { + WALFactory walFactory) throws IOException { this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = @@ -128,6 +129,7 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager(); this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); + WALProvider walProvider = walFactory.getWALProvider(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, walProvider != null ? walProvider.getWALFileLengthProvider() : p -> OptionalLong.empty(), @@ -198,7 +200,6 @@ public void join() { * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace * directory required for replicating hfiles * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory - * @throws IOException */ @Override public void replicateLogEntries(List entries, CellScanner cells, @@ -211,7 +212,6 @@ public void replicateLogEntries(List entries, CellScanner cells, /** * If replication is enabled and this cluster is a master, * it starts - * @throws IOException */ @Override public void startReplicationService() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 98490f137dbe..b04c7eb75f02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -82,7 +83,8 @@ public boolean isAborted() { System.out.println("Start Replication Server start"); Replication replication = new Replication(); - replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, null); + replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, + new WALFactory(conf, "test", false)); ReplicationSourceManager manager = replication.getReplicationManager(); manager.init().get(); while (manager.activeFailoverTaskCount() > 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 6e1692a9a2bd..455b27298156 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 8e38114fa0a5..4abb00fee03c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -194,7 +194,8 @@ protected static void setupZkAndReplication() throws Exception { logDir = utility.getDataTestDir(HConstants.HREGION_LOGDIR_NAME); remoteLogDir = utility.getDataTestDir(ReplicationUtils.REMOTE_WAL_DIR_NAME); replication = new Replication(); - replication.initialize(new DummyServer(), fs, logDir, oldLogDir, null); + replication.initialize(new DummyServer(), fs, logDir, oldLogDir, + new WALFactory(conf, "test", false)); managerOfCluster = getManagerFromCluster(); if (managerOfCluster != null) { // After replication procedure, we need to add peer by hand (other than by receiving From 2c5055f81ae487f9d765fbbb8164b0c2a210b8ad Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 22 Sep 2020 12:09:02 -0700 Subject: [PATCH 064/769] HBASE-25068 Pass WALFactory to Replication so it knows of all WALProviders, not just default/user-space (#2434) Pass WALFactory to Replication instead of WALProvider. WALFactory has all WALProviders in it, not just the user-space WALProvider. Do this so ReplicationService has access to all WALProviders in the Server (To be exploited by the follow-on patch in HBASE-25055) Signed-off-by: Duo Zhang From 7e910a573f30a9995cb779fa55a6911629ac2e5f Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Wed, 23 Sep 2020 08:30:43 +0800 Subject: [PATCH 065/769] HBASE-25074 Refactor ReplicationSinkManager: reduce code and make it easy to understand (#2430) Signed-off-by: Wellington Chevreuil Signed-off-by: Duo Zhang --- .../replication/HBaseReplicationEndpoint.java | 215 +++++++++++++----- .../HBaseInterClusterReplicationEndpoint.java | 51 +---- .../regionserver/ReplicationSinkManager.java | 193 ---------------- .../TestHBaseReplicationEndpoint.java | 210 +++++++++++++++++ .../TestReplicationSinkManager.java | 210 ----------------- .../TestSerialReplicationEndpoint.java | 10 +- 6 files changed, 382 insertions(+), 507 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 3cde0d5113a0..850a79125562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -22,8 +22,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; +import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; +import org.apache.hadoop.hbase.client.ClusterConnectionFactory; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; @@ -38,6 +46,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + /** * A {@link BaseReplicationEndpoint} for replication endpoints whose * target cluster is an HBase cluster. @@ -50,8 +61,58 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private ZKWatcher zkw = null; - private List regionServers = new ArrayList<>(0); - private long lastRegionServerUpdate; + protected Configuration conf; + + protected AsyncClusterConnection conn; + + /** + * Default maximum number of times a replication sink can be reported as bad before + * it will no longer be provided as a sink for replication without the pool of + * replication sinks being refreshed. + */ + public static final int DEFAULT_BAD_SINK_THRESHOLD = 3; + + /** + * Default ratio of the total number of peer cluster region servers to consider + * replicating to. + */ + public static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; + + // Ratio of total number of potential peer region servers to be used + private float ratio; + + // Maximum number of times a sink can be reported as bad before the pool of + // replication sinks is refreshed + private int badSinkThreshold; + // Count of "bad replication sink" reports per peer sink + private Map badReportCounts; + + private List sinkServers = new ArrayList<>(0); + + /* + * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different + * Connection implementations, or initialize it in a different way, so defining createConnection + * as protected for possible overridings. + */ + protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { + return ClusterConnectionFactory.createAsyncClusterConnection(conf, + null, User.getCurrent()); + } + + @Override + public void init(Context context) throws IOException { + super.init(context); + this.conf = HBaseConfiguration.create(ctx.getConfiguration()); + // TODO: This connection is replication specific or we should make it particular to + // replication and make replication specific settings such as compression or codec to use + // passing Cells. + this.conn = createConnection(this.conf); + this.ratio = + ctx.getConfiguration().getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); + this.badSinkThreshold = + ctx.getConfiguration().getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); + this.badReportCounts = Maps.newHashMap(); + } protected synchronized void disconnect() { if (zkw != null) { @@ -63,7 +124,7 @@ protected synchronized void disconnect() { * A private method used to re-establish a zookeeper session with a peer cluster. * @param ke */ - protected void reconnect(KeeperException ke) { + private void reconnect(KeeperException ke) { if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException || ke instanceof AuthFailedException) { String clusterKey = ctx.getPeerConfig().getClusterKey(); @@ -117,23 +178,17 @@ public synchronized UUID getPeerUUID() { return peerUUID; } - /** - * Get the ZK connection to this peer - * @return zk connection - */ - protected synchronized ZKWatcher getZkw() { - return zkw; - } - /** * Closes the current ZKW (if not null) and creates a new one * @throws IOException If anything goes wrong connecting */ - synchronized void reloadZkWatcher() throws IOException { - if (zkw != null) zkw.close(); + private synchronized void reloadZkWatcher() throws IOException { + if (zkw != null) { + zkw.close(); + } zkw = new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); - getZkw().registerListener(new PeerRegionServerListener(this)); + zkw.registerListener(new PeerRegionServerListener(this)); } @Override @@ -150,13 +205,19 @@ public boolean isAborted() { /** * Get the list of all the region servers from the specified peer - * @param zkw zk connection to use + * * @return list of region server addresses or an empty list if the slave is unavailable */ - protected static List fetchSlavesAddresses(ZKWatcher zkw) - throws KeeperException { - List children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, - zkw.getZNodePaths().rsZNode); + protected List fetchSlavesAddresses() { + List children = null; + try { + children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); + } catch (KeeperException ke) { + if (LOG.isDebugEnabled()) { + LOG.debug("Fetch slaves addresses failed", ke); + } + reconnect(ke); + } if (children == null) { return Collections.emptyList(); } @@ -167,43 +228,70 @@ protected static List fetchSlavesAddresses(ZKWatcher zkw) return addresses; } + protected synchronized void chooseSinks() { + List slaveAddresses = fetchSlavesAddresses(); + if (slaveAddresses.isEmpty()) { + LOG.warn("No sinks available at peer. Will not be able to replicate"); + } + Collections.shuffle(slaveAddresses, ThreadLocalRandom.current()); + int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); + this.sinkServers = slaveAddresses.subList(0, numSinks); + badReportCounts.clear(); + } + + protected synchronized int getNumSinks() { + return sinkServers.size(); + } + /** - * Get a list of all the addresses of all the available region servers - * for this peer cluster, or an empty list if no region servers available at peer cluster. - * @return list of addresses + * Get a randomly-chosen replication sink to replicate to. + * @return a replication sink to replicate to */ - // Synchronize peer cluster connection attempts to avoid races and rate - // limit connections when multiple replication sources try to connect to - // the peer cluster. If the peer cluster is down we can get out of control - // over time. - public synchronized List getRegionServers() { - try { - setRegionServers(fetchSlavesAddresses(this.getZkw())); - } catch (KeeperException ke) { - if (LOG.isDebugEnabled()) { - LOG.debug("Fetch slaves addresses failed", ke); - } - reconnect(ke); + protected synchronized SinkPeer getReplicationSink() throws IOException { + if (sinkServers.isEmpty()) { + LOG.info("Current list of sinks is out of date or empty, updating"); + chooseSinks(); } - return regionServers; + if (sinkServers.isEmpty()) { + throw new IOException("No replication sinks are available"); + } + ServerName serverName = + sinkServers.get(ThreadLocalRandom.current().nextInt(sinkServers.size())); + return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); } /** - * Set the list of region servers for that peer - * @param regionServers list of addresses for the region servers + * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it + * failed). If a single SinkPeer is reported as bad more than + * replication.bad.sink.threshold times, it will be removed + * from the pool of potential replication targets. + * + * @param sinkPeer The SinkPeer that had a failed replication attempt on it */ - public synchronized void setRegionServers(List regionServers) { - this.regionServers = regionServers; - lastRegionServerUpdate = System.currentTimeMillis(); + protected synchronized void reportBadSink(SinkPeer sinkPeer) { + ServerName serverName = sinkPeer.getServerName(); + int badReportCount = badReportCounts.compute(serverName, (k, v) -> v == null ? 1 : v + 1); + if (badReportCount > badSinkThreshold) { + this.sinkServers.remove(serverName); + if (sinkServers.isEmpty()) { + chooseSinks(); + } + } } /** - * Get the timestamp at which the last change occurred to the list of region servers to replicate - * to. - * @return The System.currentTimeMillis at the last time the list of peer region servers changed. + * Report that a {@code SinkPeer} successfully replicated a chunk of data. + * + * @param sinkPeer + * The SinkPeer that had a failed replication attempt on it */ - public long getLastRegionServerUpdate() { - return lastRegionServerUpdate; + protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) { + badReportCounts.remove(sinkPeer.getServerName()); + } + + @VisibleForTesting + List getSinkServers() { + return sinkServers; } /** @@ -214,22 +302,39 @@ public static class PeerRegionServerListener extends ZKListener { private final HBaseReplicationEndpoint replicationEndpoint; private final String regionServerListNode; - public PeerRegionServerListener(HBaseReplicationEndpoint replicationPeer) { - super(replicationPeer.getZkw()); - this.replicationEndpoint = replicationPeer; - this.regionServerListNode = replicationEndpoint.getZkw().getZNodePaths().rsZNode; + public PeerRegionServerListener(HBaseReplicationEndpoint endpoint) { + super(endpoint.zkw); + this.replicationEndpoint = endpoint; + this.regionServerListNode = endpoint.zkw.getZNodePaths().rsZNode; } @Override public synchronized void nodeChildrenChanged(String path) { if (path.equals(regionServerListNode)) { - try { - LOG.info("Detected change to peer region servers, fetching updated list"); - replicationEndpoint.setRegionServers(fetchSlavesAddresses(replicationEndpoint.getZkw())); - } catch (KeeperException e) { - LOG.error("Error reading slave addresses", e); - } + LOG.info("Detected change to peer region servers, fetching updated list"); + replicationEndpoint.chooseSinks(); } } } + + /** + * Wraps a replication region server sink to provide the ability to identify it. + */ + public static class SinkPeer { + private ServerName serverName; + private AsyncRegionServerAdmin regionServer; + + public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) { + this.serverName = serverName; + this.regionServer = regionServer; + } + + ServerName getServerName() { + return serverName; + } + + public AsyncRegionServerAdmin getRegionServer() { + return regionServer; + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 4e0669c2e9fd..b6e1f69173fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -41,7 +41,6 @@ import java.util.stream.Stream; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CellUtil; @@ -60,7 +59,6 @@ import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -100,8 +98,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi public static final String REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY = "hbase.replication.drop.on.deleted.columnfamily"; - private AsyncClusterConnection conn; - private Configuration conf; // How long should we sleep for each retry private long sleepForRetries; // Maximum number of retries before taking bold actions @@ -114,8 +110,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private int replicationRpcLimit; //Metrics for this source private MetricsSource metrics; - // Handles connecting to peer region servers - private ReplicationSinkManager replicationSinkMgr; private boolean peersSelected = false; private String replicationClusterId = ""; private ThreadPoolExecutor exec; @@ -130,25 +124,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi //Initialising as 0 to guarantee at least one logging message private long lastSinkFetchTime = 0; - /* - * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different - * Connection implementations, or initialize it in a different way, so defining createConnection - * as protected for possible overridings. - */ - protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { - return ClusterConnectionFactory.createAsyncClusterConnection(conf, - null, User.getCurrent()); - } - - /* - * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different - * ReplicationSinkManager implementations, or initialize it in a different way, - * so defining createReplicationSinkManager as protected for possible overridings. - */ - protected ReplicationSinkManager createReplicationSinkManager(AsyncClusterConnection conn) { - return new ReplicationSinkManager(conn, this, this.conf); - } - @Override public void init(Context context) throws IOException { super.init(context); @@ -171,8 +146,6 @@ public void init(Context context) throws IOException { this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); - // ReplicationQueueInfo parses the peerId out of the znode for us - this.replicationSinkMgr = createReplicationSinkManager(conn); // per sink thread pool this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); @@ -211,14 +184,11 @@ private void decorateConf() { } private void connectToPeers() { - getRegionServers(); - int sleepMultiplier = 1; - // Connect to peer cluster first, unless we have to stop - while (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { - replicationSinkMgr.chooseSinks(); - if (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { + while (this.isRunning() && getNumSinks() == 0) { + chooseSinks(); + if (this.isRunning() && getNumSinks() == 0) { if (sleepForRetries("Waiting for peers", sleepMultiplier)) { sleepMultiplier++; } @@ -253,7 +223,7 @@ private int getEstimatedEntrySize(Entry e) { } private List> createParallelBatches(final List entries) { - int numSinks = Math.max(replicationSinkMgr.getNumSinks(), 1); + int numSinks = Math.max(getNumSinks(), 1); int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks); List> entryLists = Stream.generate(ArrayList::new).limit(n).collect(Collectors.toList()); @@ -513,7 +483,7 @@ public boolean replicate(ReplicateContext replicateContext) { peersSelected = true; } - int numSinks = replicationSinkMgr.getNumSinks(); + int numSinks = getNumSinks(); if (numSinks == 0) { if((System.currentTimeMillis() - lastSinkFetchTime) >= (maxRetriesMultiplier*1000)) { LOG.warn( @@ -561,7 +531,7 @@ public boolean replicate(ReplicateContext replicateContext) { } else { LOG.warn("{} Peer encountered RemoteException, rechecking all sinks: ", logPeerId(), ioe); - replicationSinkMgr.chooseSinks(); + chooseSinks(); } } else { if (ioe instanceof SocketTimeoutException) { @@ -574,7 +544,7 @@ public boolean replicate(ReplicateContext replicateContext) { this.socketTimeoutMultiplier); } else if (ioe instanceof ConnectException || ioe instanceof UnknownHostException) { LOG.warn("{} Peer is unavailable, rechecking all sinks: ", logPeerId(), ioe); - replicationSinkMgr.chooseSinks(); + chooseSinks(); } else { LOG.warn("{} Can't replicate because of a local or network error: ", logPeerId(), ioe); } @@ -629,7 +599,7 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) LOG.trace("{} Replicating batch {} of {} entries with total size {} bytes to {}", logPeerId(), entriesHashCode, entries.size(), size, replicationClusterId); } - sinkPeer = replicationSinkMgr.getReplicationSink(); + sinkPeer = getReplicationSink(); AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer(); try { ReplicationProtobufUtil.replicateWALEntry(rsAdmin, @@ -644,10 +614,10 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) } throw e; } - replicationSinkMgr.reportSinkSuccess(sinkPeer); + reportSinkSuccess(sinkPeer); } catch (IOException ioe) { if (sinkPeer != null) { - replicationSinkMgr.reportBadSink(sinkPeer); + reportBadSink(sinkPeer); } throw ioe; } @@ -683,5 +653,4 @@ protected Callable createReplicator(List entries, int batchIndex private String logPeerId(){ return "[Source for peer " + this.ctx.getPeerId() + "]:"; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java deleted file mode 100644 index db12dc0a6fdf..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; - -/** - * Maintains a collection of peers to replicate to, and randomly selects a - * single peer to replicate to per set of data to replicate. Also handles - * keeping track of peer availability. - */ -@InterfaceAudience.Private -public class ReplicationSinkManager { - - private static final Logger LOG = LoggerFactory.getLogger(ReplicationSinkManager.class); - - /** - * Default maximum number of times a replication sink can be reported as bad before - * it will no longer be provided as a sink for replication without the pool of - * replication sinks being refreshed. - */ - static final int DEFAULT_BAD_SINK_THRESHOLD = 3; - - /** - * Default ratio of the total number of peer cluster region servers to consider - * replicating to. - */ - static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; - - - private final AsyncClusterConnection conn; - - private final HBaseReplicationEndpoint endpoint; - - // Count of "bad replication sink" reports per peer sink - private final Map badReportCounts; - - // Ratio of total number of potential peer region servers to be used - private final float ratio; - - // Maximum number of times a sink can be reported as bad before the pool of - // replication sinks is refreshed - private final int badSinkThreshold; - - // A timestamp of the last time the list of replication peers changed - private long lastUpdateToPeers; - - // The current pool of sinks to which replication can be performed - private List sinks = Lists.newArrayList(); - - /** - * Instantiate for a single replication peer cluster. - * @param conn connection to the peer cluster - * @param endpoint replication endpoint for inter cluster replication - * @param conf HBase configuration, used for determining replication source ratio and bad peer - * threshold - */ - public ReplicationSinkManager(AsyncClusterConnection conn, HBaseReplicationEndpoint endpoint, - Configuration conf) { - this.conn = conn; - this.endpoint = endpoint; - this.badReportCounts = Maps.newHashMap(); - this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); - this.badSinkThreshold = - conf.getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); - } - - /** - * Get a randomly-chosen replication sink to replicate to. - * @return a replication sink to replicate to - */ - public synchronized SinkPeer getReplicationSink() throws IOException { - if (endpoint.getLastRegionServerUpdate() > this.lastUpdateToPeers || sinks.isEmpty()) { - LOG.info("Current list of sinks is out of date or empty, updating"); - chooseSinks(); - } - - if (sinks.isEmpty()) { - throw new IOException("No replication sinks are available"); - } - ServerName serverName = sinks.get(ThreadLocalRandom.current().nextInt(sinks.size())); - return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); - } - - /** - * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it - * failed). If a single SinkPeer is reported as bad more than - * replication.bad.sink.threshold times, it will be removed - * from the pool of potential replication targets. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it - */ - public synchronized void reportBadSink(SinkPeer sinkPeer) { - ServerName serverName = sinkPeer.getServerName(); - int badReportCount = (badReportCounts.containsKey(serverName) - ? badReportCounts.get(serverName) : 0) + 1; - badReportCounts.put(serverName, badReportCount); - if (badReportCount > badSinkThreshold) { - this.sinks.remove(serverName); - if (sinks.isEmpty()) { - chooseSinks(); - } - } - } - - /** - * Report that a {@code SinkPeer} successfully replicated a chunk of data. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it - */ - public synchronized void reportSinkSuccess(SinkPeer sinkPeer) { - badReportCounts.remove(sinkPeer.getServerName()); - } - - /** - * Refresh the list of sinks. - */ - public synchronized void chooseSinks() { - List slaveAddresses = endpoint.getRegionServers(); - if(slaveAddresses.isEmpty()){ - LOG.warn("No sinks available at peer. Will not be able to replicate"); - } - Collections.shuffle(slaveAddresses, ThreadLocalRandom.current()); - int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); - sinks = slaveAddresses.subList(0, numSinks); - lastUpdateToPeers = System.currentTimeMillis(); - badReportCounts.clear(); - } - - public synchronized int getNumSinks() { - return sinks.size(); - } - - @VisibleForTesting - protected List getSinksForTesting() { - return Collections.unmodifiableList(sinks); - } - - /** - * Wraps a replication region server sink to provide the ability to identify - * it. - */ - public static class SinkPeer { - private ServerName serverName; - private AsyncRegionServerAdmin regionServer; - - public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) { - this.serverName = serverName; - this.regionServer = regionServer; - } - - ServerName getServerName() { - return serverName; - } - - public AsyncRegionServerAdmin getRegionServer() { - return regionServer; - } - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java new file mode 100644 index 000000000000..41601417a9d4 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; +import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; +import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.SinkPeer; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category({ReplicationTests.class, SmallTests.class}) +public class TestHBaseReplicationEndpoint { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHBaseReplicationEndpoint.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestHBaseReplicationEndpoint.class); + + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private HBaseReplicationEndpoint endpoint; + + @Before + public void setUp() throws Exception { + try { + ReplicationEndpoint.Context context = + new ReplicationEndpoint.Context(null, UTIL.getConfiguration(), UTIL.getConfiguration(), + null, null, null, null, null, null, null); + endpoint = new DummyHBaseReplicationEndpoint(); + endpoint.init(context); + } catch (Exception e) { + LOG.info("Failed", e); + } + } + + @Test + public void testChooseSinks() { + List serverNames = Lists.newArrayList(); + int totalServers = 20; + for (int i = 0; i < totalServers; i++) { + serverNames.add(mock(ServerName.class)); + } + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + int expected = (int) (totalServers * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + } + + @Test + public void testChooseSinksLessThanRatioAvailable() { + List serverNames = Lists.newArrayList(mock(ServerName.class), + mock(ServerName.class)); + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + assertEquals(1, endpoint.getNumSinks()); + } + + @Test + public void testReportBadSink() { + ServerName serverNameA = mock(ServerName.class); + ServerName serverNameB = mock(ServerName.class); + ((DummyHBaseReplicationEndpoint) endpoint) + .setRegionServers(Lists.newArrayList(serverNameA, serverNameB)); + endpoint.chooseSinks(); + // Sanity check + assertEquals(1, endpoint.getNumSinks()); + + SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); + endpoint.reportBadSink(sinkPeer); + // Just reporting a bad sink once shouldn't have an effect + assertEquals(1, endpoint.getNumSinks()); + } + + /** + * Once a SinkPeer has been reported as bad more than BAD_SINK_THRESHOLD times, it should not + * be replicated to anymore. + */ + @Test + public void testReportBadSinkPastThreshold() { + List serverNames = Lists.newArrayList(); + int totalServers = 30; + for (int i = 0; i < totalServers; i++) { + serverNames.add(mock(ServerName.class)); + } + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + // Sanity check + int expected = (int) (totalServers * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + + ServerName badSinkServer0 = endpoint.getSinkServers().get(0); + SinkPeer sinkPeer = new SinkPeer(badSinkServer0, mock(AsyncRegionServerAdmin.class)); + for (int i = 0; i <= HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD; i++) { + endpoint.reportBadSink(sinkPeer); + } + // Reporting a bad sink more than the threshold count should remove it + // from the list of potential sinks + assertEquals(expected - 1, endpoint.getNumSinks()); + + // now try a sink that has some successes + ServerName badSinkServer1 = endpoint.getSinkServers().get(0); + sinkPeer = new SinkPeer(badSinkServer1, mock(AsyncRegionServerAdmin.class)); + for (int i = 0; i < HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD; i++) { + endpoint.reportBadSink(sinkPeer); + } + endpoint.reportSinkSuccess(sinkPeer); // one success + endpoint.reportBadSink(sinkPeer); + // did not remove the sink, since we had one successful try + assertEquals(expected - 1, endpoint.getNumSinks()); + + for (int i = 0; i < HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD - 1; i++) { + endpoint.reportBadSink(sinkPeer); + } + // still not remove, since the success reset the counter + assertEquals(expected - 1, endpoint.getNumSinks()); + endpoint.reportBadSink(sinkPeer); + // but we exhausted the tries + assertEquals(expected - 2, endpoint.getNumSinks()); + } + + @Test + public void testReportBadSinkDownToZeroSinks() { + List serverNames = Lists.newArrayList(); + int totalServers = 4; + for (int i = 0; i < totalServers; i++) { + serverNames.add(mock(ServerName.class)); + } + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + // Sanity check + int expected = (int) (totalServers * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + + ServerName serverNameA = endpoint.getSinkServers().get(0); + ServerName serverNameB = endpoint.getSinkServers().get(1); + + SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); + SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AsyncRegionServerAdmin.class)); + + for (int i = 0; i <= HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD; i++) { + endpoint.reportBadSink(sinkPeerA); + endpoint.reportBadSink(sinkPeerB); + } + + // We've gone down to 0 good sinks, so the replication sinks + // should have been refreshed now, so out of 4 servers, 2 are not considered as they are + // reported as bad. + expected = + (int) ((totalServers - 2) * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + } + + private static class DummyHBaseReplicationEndpoint extends HBaseReplicationEndpoint { + + List regionServers; + + public void setRegionServers(List regionServers) { + this.regionServers = regionServers; + } + + @Override + public List fetchSlavesAddresses() { + return regionServers; + } + + @Override + public boolean replicate(ReplicateContext replicateContext) { + return false; + } + + @Override + public AsyncClusterConnection createConnection(Configuration conf) throws IOException { + return null; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java deleted file mode 100644 index f8a2ab917605..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; - -import java.util.List; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer; -import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - -@Category({ReplicationTests.class, SmallTests.class}) -public class TestReplicationSinkManager { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSinkManager.class); - - private ReplicationSinkManager sinkManager; - private HBaseReplicationEndpoint replicationEndpoint; - - /** - * Manage the 'getRegionServers' for the tests below. Override the base class handling - * of Regionservers. We used to use a mock for this but updated guava/errorprone disallows - * mocking of classes that implement Service. - */ - private static class SetServersHBaseReplicationEndpoint extends HBaseReplicationEndpoint { - List regionServers; - - @Override - public boolean replicate(ReplicateContext replicateContext) { - return false; - } - - @Override - public synchronized void setRegionServers(List regionServers) { - this.regionServers = regionServers; - } - - @Override - public List getRegionServers() { - return this.regionServers; - } - } - - @Before - public void setUp() { - this.replicationEndpoint = new SetServersHBaseReplicationEndpoint(); - this.sinkManager = new ReplicationSinkManager(mock(AsyncClusterConnection.class), - replicationEndpoint, new Configuration()); - } - - @Test - public void testChooseSinks() { - List serverNames = Lists.newArrayList(); - int totalServers = 20; - for (int i = 0; i < totalServers; i++) { - serverNames.add(mock(ServerName.class)); - } - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - int expected = (int) (totalServers * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkManager.getNumSinks()); - - } - - @Test - public void testChooseSinks_LessThanRatioAvailable() { - List serverNames = Lists.newArrayList(mock(ServerName.class), - mock(ServerName.class)); - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - assertEquals(1, sinkManager.getNumSinks()); - } - - @Test - public void testReportBadSink() { - ServerName serverNameA = mock(ServerName.class); - ServerName serverNameB = mock(ServerName.class); - replicationEndpoint.setRegionServers(Lists.newArrayList(serverNameA, serverNameB)); - sinkManager.chooseSinks(); - // Sanity check - assertEquals(1, sinkManager.getNumSinks()); - - SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); - - sinkManager.reportBadSink(sinkPeer); - - // Just reporting a bad sink once shouldn't have an effect - assertEquals(1, sinkManager.getNumSinks()); - - } - - /** - * Once a SinkPeer has been reported as bad more than BAD_SINK_THRESHOLD times, it should not - * be replicated to anymore. - */ - @Test - public void testReportBadSink_PastThreshold() { - List serverNames = Lists.newArrayList(); - int totalServers = 30; - for (int i = 0; i < totalServers; i++) { - serverNames.add(mock(ServerName.class)); - } - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - // Sanity check - int expected = (int) (totalServers * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkManager.getNumSinks()); - - ServerName serverName = sinkManager.getSinksForTesting().get(0); - - SinkPeer sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class)); - - sinkManager.reportSinkSuccess(sinkPeer); // has no effect, counter does not go negative - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) { - sinkManager.reportBadSink(sinkPeer); - } - - // Reporting a bad sink more than the threshold count should remove it - // from the list of potential sinks - assertEquals(expected - 1, sinkManager.getNumSinks()); - - // - // now try a sink that has some successes - // - serverName = sinkManager.getSinksForTesting().get(0); - - sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class)); - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-1; i++) { - sinkManager.reportBadSink(sinkPeer); - } - sinkManager.reportSinkSuccess(sinkPeer); // one success - sinkManager.reportBadSink(sinkPeer); - - // did not remove the sink, since we had one successful try - assertEquals(expected - 1, sinkManager.getNumSinks()); - - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-2; i++) { - sinkManager.reportBadSink(sinkPeer); - } - // still not remove, since the success reset the counter - assertEquals(expected - 1, sinkManager.getNumSinks()); - - sinkManager.reportBadSink(sinkPeer); - // but we exhausted the tries - assertEquals(expected - 2, sinkManager.getNumSinks()); - } - - @Test - public void testReportBadSink_DownToZeroSinks() { - List serverNames = Lists.newArrayList(); - int totalServers = 4; - for (int i = 0; i < totalServers; i++) { - serverNames.add(mock(ServerName.class)); - } - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - // Sanity check - List sinkList = sinkManager.getSinksForTesting(); - int expected = (int) (totalServers * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkList.size()); - - ServerName serverNameA = sinkList.get(0); - ServerName serverNameB = sinkList.get(1); - - SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); - SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AsyncRegionServerAdmin.class)); - - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) { - sinkManager.reportBadSink(sinkPeerA); - sinkManager.reportBadSink(sinkPeerB); - } - - // We've gone down to 0 good sinks, so the replication sinks - // should have been refreshed now, so out of 4 servers, 2 are not considered as they are - // reported as bad. - expected = (int) ((totalServers - 2) * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkManager.getNumSinks()); - } - -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java index 3c88ab315919..090129174cca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; @@ -175,14 +174,9 @@ protected Callable createReplicator(List entries, int ordinal, i } @Override - public synchronized List getRegionServers() { + public synchronized int getNumSinks() { // Return multiple server names for endpoint parallel replication. - return new ArrayList<>( - ImmutableList.of(ServerName.valueOf("www.example.com", 12016, 1525245876026L), - ServerName.valueOf("www.example2.com", 12016, 1525245876026L), - ServerName.valueOf("www.example3.com", 12016, 1525245876026L), - ServerName.valueOf("www.example4.com", 12016, 1525245876026L), - ServerName.valueOf("www.example4.com", 12016, 1525245876026L))); + return 10; } } } From e7797208d6ca10a10d37b77591e1f0531ed57dfc Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 22 Sep 2020 20:48:31 -0700 Subject: [PATCH 066/769] Revert "HBASE-25068 Pass WALFactory to Replication so it knows of all WALProviders, not just default/user-space" This reverts commit 17ebf917ba354e4632b726323b2b32af3aa6c8de. --- .../hadoop/hbase/regionserver/HRegionServer.java | 15 ++++++++------- .../hbase/regionserver/ReplicationService.java | 11 +++++++---- .../replication/regionserver/Replication.java | 8 ++++---- .../regionserver/ReplicationSyncUp.java | 6 ++---- .../hbase/replication/TestReplicationBase.java | 2 +- .../TestReplicationSourceManager.java | 3 +-- 6 files changed, 23 insertions(+), 22 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8abede5b272a..f14da2f6a17e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1911,7 +1911,8 @@ private void setupWALAndReplication() throws IOException { throw new IOException("Can not create wal directory " + logDir); } // Instantiate replication if replication enabled. Pass it the log directories. - createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); + createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, + factory.getWALProvider()); } this.walFactory = factory; } @@ -3062,7 +3063,7 @@ public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { * Load the replication executorService objects, if any */ private static void createNewReplicationInstance(Configuration conf, HRegionServer server, - FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException { + FileSystem walFs, Path walDir, Path oldWALDir, WALProvider walProvider) throws IOException { // read in the name of the source replication class from the config file. String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); @@ -3075,19 +3076,19 @@ private static void createNewReplicationInstance(Configuration conf, HRegionServ // only one object. if (sourceClassname.equals(sinkClassname)) { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); server.replicationSinkHandler = (ReplicationSinkService) server.replicationSourceHandler; } else { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); server.replicationSinkHandler = newReplicationInstance(sinkClassname, - ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory); + ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walProvider); } } private static T newReplicationInstance(String classname, Class xface, Configuration conf, HRegionServer server, FileSystem walFs, Path logDir, - Path oldLogDir, WALFactory walFactory) throws IOException { + Path oldLogDir, WALProvider walProvider) throws IOException { final Class clazz; try { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); @@ -3096,7 +3097,7 @@ private static T newReplicationInstance(String cl throw new IOException("Could not find class for " + classname); } T service = ReflectionUtils.newInstance(clazz, conf); - service.initialize(server, walFs, logDir, oldLogDir, walFactory); + service.initialize(server, walFs, logDir, oldLogDir, walProvider); return service; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index 33b3321755fa..e9bbaea8ae46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,11 +32,14 @@ */ @InterfaceAudience.Private public interface ReplicationService { + /** * Initializes the replication service object. + * @param walProvider can be null if not initialized inside a live region server environment, for + * example, {@code ReplicationSyncUp}. */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALFactory walFactory) - throws IOException; + void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALProvider walProvider) + throws IOException; /** * Start replication services. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index d8a696c7172e..195877bf5f3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; -import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.yetus.audience.InterfaceAudience; @@ -90,7 +89,7 @@ public Replication() { @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, - WALFactory walFactory) throws IOException { + WALProvider walProvider) throws IOException { this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = @@ -129,7 +128,6 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager(); this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); - WALProvider walProvider = walFactory.getWALProvider(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, walProvider != null ? walProvider.getWALFileLengthProvider() : p -> OptionalLong.empty(), @@ -200,6 +198,7 @@ public void join() { * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace * directory required for replicating hfiles * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory + * @throws IOException */ @Override public void replicateLogEntries(List entries, CellScanner cells, @@ -212,6 +211,7 @@ public void replicateLogEntries(List entries, CellScanner cells, /** * If replication is enabled and this cluster is a master, * it starts + * @throws IOException */ @Override public void startReplicationService() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index b04c7eb75f02..98490f137dbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -83,8 +82,7 @@ public boolean isAborted() { System.out.println("Start Replication Server start"); Replication replication = new Replication(); - replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, - new WALFactory(conf, "test", false)); + replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, null); ReplicationSourceManager manager = replication.getReplicationManager(); manager.init().get(); while (manager.activeFailoverTaskCount() > 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 455b27298156..6e1692a9a2bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 4abb00fee03c..8e38114fa0a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -194,8 +194,7 @@ protected static void setupZkAndReplication() throws Exception { logDir = utility.getDataTestDir(HConstants.HREGION_LOGDIR_NAME); remoteLogDir = utility.getDataTestDir(ReplicationUtils.REMOTE_WAL_DIR_NAME); replication = new Replication(); - replication.initialize(new DummyServer(), fs, logDir, oldLogDir, - new WALFactory(conf, "test", false)); + replication.initialize(new DummyServer(), fs, logDir, oldLogDir, null); managerOfCluster = getManagerFromCluster(); if (managerOfCluster != null) { // After replication procedure, we need to add peer by hand (other than by receiving From 98225ff1a369add92f71e5ee04052fdd8875c2f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BC=9F?= Date: Wed, 23 Sep 2020 14:36:00 +0800 Subject: [PATCH 067/769] Should not use XXXService.Interface.class.getSimpleName as stub key prefix in AsyncConnectionImpl (#2443) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 97b70e1a7ad8..406af0d4fdd6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -254,7 +254,7 @@ private ClientService.Interface createRegionServerStub(ServerName serverName) th ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(rsStubs, - getStubKey(ClientService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(ClientService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createRegionServerStub(serverName)); } @@ -268,7 +268,7 @@ private AdminService.Interface createAdminServerStub(ServerName serverName) thro AdminService.Interface getAdminStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(adminSubs, - getStubKey(AdminService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(AdminService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createAdminServerStub(serverName)); } From 3ed41a0e45ec4cc5c7299ea530f630d0cee50f3c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 23 Sep 2020 14:37:27 +0800 Subject: [PATCH 068/769] Revert "Should not use XXXService.Interface.class.getSimpleName as stub key prefix in AsyncConnectionImpl (#2443)" This reverts commit 98225ff1a369add92f71e5ee04052fdd8875c2f8. --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 406af0d4fdd6..97b70e1a7ad8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -254,7 +254,7 @@ private ClientService.Interface createRegionServerStub(ServerName serverName) th ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(rsStubs, - getStubKey(ClientService.getDescriptor().getName(), serverName, hostnameCanChange), + getStubKey(ClientService.Interface.class.getSimpleName(), serverName, hostnameCanChange), () -> createRegionServerStub(serverName)); } @@ -268,7 +268,7 @@ private AdminService.Interface createAdminServerStub(ServerName serverName) thro AdminService.Interface getAdminStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(adminSubs, - getStubKey(AdminService.getDescriptor().getName(), serverName, hostnameCanChange), + getStubKey(AdminService.Interface.class.getSimpleName(), serverName, hostnameCanChange), () -> createAdminServerStub(serverName)); } From dca0b593cf3e325db2b8f8e1b21014583b7eb4d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BC=9F?= Date: Wed, 23 Sep 2020 14:36:00 +0800 Subject: [PATCH 069/769] HBASE-25073 Should not use XXXService.Interface.class.getSimpleName as stub key prefix in AsyncConnectionImpl (#2443) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 97b70e1a7ad8..406af0d4fdd6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -254,7 +254,7 @@ private ClientService.Interface createRegionServerStub(ServerName serverName) th ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(rsStubs, - getStubKey(ClientService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(ClientService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createRegionServerStub(serverName)); } @@ -268,7 +268,7 @@ private AdminService.Interface createAdminServerStub(ServerName serverName) thro AdminService.Interface getAdminStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(adminSubs, - getStubKey(AdminService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(AdminService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createAdminServerStub(serverName)); } From 8bfa2cb2eedcf050b26a28961e1b77dbf3cd8c95 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 23 Sep 2020 14:56:50 -0700 Subject: [PATCH 070/769] HBASE-25079 Upgrade Bootstrap to 3.3.7 (#2442) Signed-off-by: Viraj Jasani --- .../static/css/bootstrap-theme.css | 394 - .../static/css/bootstrap-theme.min.css | 14 +- .../hbase-webapps/static/css/bootstrap.css | 6805 ----------------- .../static/css/bootstrap.min.css | 13 +- .../fonts/glyphicons-halflings-regular.eot | Bin 14079 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 480 +- .../fonts/glyphicons-halflings-regular.ttf | Bin 29512 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 16448 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../hbase-webapps/static/js/bootstrap.js | 1999 ----- .../hbase-webapps/static/js/bootstrap.min.js | 13 +- .../static/css/bootstrap-theme.css | 394 - .../static/css/bootstrap-theme.min.css | 14 +- .../hbase-webapps/static/css/bootstrap.css | 6805 ----------------- .../static/css/bootstrap.min.css | 13 +- .../fonts/glyphicons-halflings-regular.eot | Bin 14079 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 480 +- .../fonts/glyphicons-halflings-regular.ttf | Bin 29512 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 16448 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../hbase-webapps/static/js/bootstrap.js | 1999 ----- .../hbase-webapps/static/js/bootstrap.min.js | 13 +- pom.xml | 2 + 23 files changed, 576 insertions(+), 18862 deletions(-) delete mode 100755 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css delete mode 100755 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff create mode 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff2 delete mode 100755 hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.min.js delete mode 100755 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css delete mode 100755 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap.css mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap.min.css mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff create mode 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff2 delete mode 100755 hbase-thrift/src/main/resources/hbase-webapps/static/js/bootstrap.js mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/js/bootstrap.min.js diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css deleted file mode 100755 index 10c9ff578722..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css +++ /dev/null @@ -1,394 +0,0 @@ -/*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ - -.btn-default, -.btn-primary, -.btn-success, -.btn-info, -.btn-warning, -.btn-danger { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.btn-default:active, -.btn-primary:active, -.btn-success:active, -.btn-info:active, -.btn-warning:active, -.btn-danger:active, -.btn-default.active, -.btn-primary.active, -.btn-success.active, -.btn-info.active, -.btn-warning.active, -.btn-danger.active { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn:active, -.btn.active { - background-image: none; -} - -.btn-default { - text-shadow: 0 1px 0 #fff; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#e6e6e6)); - background-image: -webkit-linear-gradient(top, #ffffff, 0%, #e6e6e6, 100%); - background-image: -moz-linear-gradient(top, #ffffff 0%, #e6e6e6 100%); - background-image: linear-gradient(to bottom, #ffffff 0%, #e6e6e6 100%); - background-repeat: repeat-x; - border-color: #e0e0e0; - border-color: #ccc; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0); -} - -.btn-default:active, -.btn-default.active { - background-color: #e6e6e6; - border-color: #e0e0e0; -} - -.btn-primary { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); - background-repeat: repeat-x; - border-color: #2d6ca2; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); -} - -.btn-primary:active, -.btn-primary.active { - background-color: #3071a9; - border-color: #2d6ca2; -} - -.btn-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44)); - background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%); - background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); - background-repeat: repeat-x; - border-color: #419641; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); -} - -.btn-success:active, -.btn-success.active { - background-color: #449d44; - border-color: #419641; -} - -.btn-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f)); - background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%); - background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); - background-repeat: repeat-x; - border-color: #eb9316; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); -} - -.btn-warning:active, -.btn-warning.active { - background-color: #ec971f; - border-color: #eb9316; -} - -.btn-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c)); - background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%); - background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); - background-repeat: repeat-x; - border-color: #c12e2a; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); -} - -.btn-danger:active, -.btn-danger.active { - background-color: #c9302c; - border-color: #c12e2a; -} - -.btn-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5)); - background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%); - background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); - background-repeat: repeat-x; - border-color: #2aabd2; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); -} - -.btn-info:active, -.btn-info.active { - background-color: #31b0d5; - border-color: #2aabd2; -} - -.thumbnail, -.img-thumbnail { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} - -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus, -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - background-color: #357ebd; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); -} - -.navbar { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#f8f8f8)); - background-image: -webkit-linear-gradient(top, #ffffff, 0%, #f8f8f8, 100%); - background-image: -moz-linear-gradient(top, #ffffff 0%, #f8f8f8 100%); - background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%); - background-repeat: repeat-x; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); -} - -.navbar .navbar-nav > .active > a { - background-color: #f8f8f8; -} - -.navbar-brand, -.navbar-nav > li > a { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25); -} - -.navbar-inverse { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#3c3c3c), to(#222222)); - background-image: -webkit-linear-gradient(top, #3c3c3c, 0%, #222222, 100%); - background-image: -moz-linear-gradient(top, #3c3c3c 0%, #222222 100%); - background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0); -} - -.navbar-inverse .navbar-nav > .active > a { - background-color: #222222; -} - -.navbar-inverse .navbar-brand, -.navbar-inverse .navbar-nav > li > a { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} - -.navbar-static-top, -.navbar-fixed-top, -.navbar-fixed-bottom { - border-radius: 0; -} - -.alert { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.alert-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#c8e5bc)); - background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #c8e5bc, 100%); - background-image: -moz-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); - background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%); - background-repeat: repeat-x; - border-color: #b2dba1; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0); -} - -.alert-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#b9def0)); - background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #b9def0, 100%); - background-image: -moz-linear-gradient(top, #d9edf7 0%, #b9def0 100%); - background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%); - background-repeat: repeat-x; - border-color: #9acfea; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0); -} - -.alert-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#f8efc0)); - background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #f8efc0, 100%); - background-image: -moz-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%); - background-repeat: repeat-x; - border-color: #f5e79e; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0); -} - -.alert-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#e7c3c3)); - background-image: -webkit-linear-gradient(top, #f2dede, 0%, #e7c3c3, 100%); - background-image: -moz-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); - background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%); - background-repeat: repeat-x; - border-color: #dca7a7; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0); -} - -.progress { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ebebeb), to(#f5f5f5)); - background-image: -webkit-linear-gradient(top, #ebebeb, 0%, #f5f5f5, 100%); - background-image: -moz-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); - background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0); -} - -.progress-bar { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); -} - -.progress-bar-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44)); - background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%); - background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); -} - -.progress-bar-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5)); - background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%); - background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); -} - -.progress-bar-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f)); - background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%); - background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); -} - -.progress-bar-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c)); - background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%); - background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); -} - -.list-group { - border-radius: 4px; - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} - -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - text-shadow: 0 -1px 0 #3071a9; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3278b3)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3278b3, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3278b3 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%); - background-repeat: repeat-x; - border-color: #3278b3; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0); -} - -.panel { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.panel-default > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f5f5f5), to(#e8e8e8)); - background-image: -webkit-linear-gradient(top, #f5f5f5, 0%, #e8e8e8, 100%); - background-image: -moz-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); - background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); -} - -.panel-primary > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); -} - -.panel-success > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#d0e9c6)); - background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #d0e9c6, 100%); - background-image: -moz-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); - background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0); -} - -.panel-info > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#c4e3f3)); - background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #c4e3f3, 100%); - background-image: -moz-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); - background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0); -} - -.panel-warning > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#faf2cc)); - background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #faf2cc, 100%); - background-image: -moz-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0); -} - -.panel-danger > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#ebcccc)); - background-image: -webkit-linear-gradient(top, #f2dede, 0%, #ebcccc, 100%); - background-image: -moz-linear-gradient(top, #f2dede 0%, #ebcccc 100%); - background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0); -} - -.well { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#e8e8e8), to(#f5f5f5)); - background-image: -webkit-linear-gradient(top, #e8e8e8, 0%, #f5f5f5, 100%); - background-image: -moz-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); - background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%); - background-repeat: repeat-x; - border-color: #dcdcdc; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0); - -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); -} \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css old mode 100755 new mode 100644 index c31428b07eac..5e39401957d8 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css @@ -1,10 +1,6 @@ /*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ -.btn-default,.btn-primary,.btn-success,.btn-info,.btn-warning,.btn-danger{text-shadow:0 -1px 0 rgba(0,0,0,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075)}.btn-default:active,.btn-primary:active,.btn-success:active,.btn-info:active,.btn-warning:active,.btn-danger:active,.btn-default.active,.btn-primary.active,.btn-success.active,.btn-info.active,.btn-warning.active,.btn-danger.active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn:active,.btn.active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,0%,#e6e6e6,100%);background-image:-moz-linear-gradient(top,#fff 0,#e6e6e6 100%);background-image:linear-gradient(to bottom,#fff 0,#e6e6e6 100%);background-repeat:repeat-x;border-color:#e0e0e0;border-color:#ccc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0)}.btn-default:active,.btn-default.active{background-color:#e6e6e6;border-color:#e0e0e0}.btn-primary{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;border-color:#2d6ca2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.btn-primary:active,.btn-primary.active{background-color:#3071a9;border-color:#2d6ca2}.btn-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;border-color:#419641;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.btn-success:active,.btn-success.active{background-color:#449d44;border-color:#419641}.btn-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;border-color:#eb9316;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.btn-warning:active,.btn-warning.active{background-color:#ec971f;border-color:#eb9316}.btn-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;border-color:#c12e2a;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.btn-danger:active,.btn-danger.active{background-color:#c9302c;border-color:#c12e2a}.btn-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;border-color:#2aabd2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.btn-info:active,.btn-info.active{background-color:#31b0d5;border-color:#2aabd2}.thumbnail,.img-thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus,.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{background-color:#357ebd;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.navbar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#f8f8f8));background-image:-webkit-linear-gradient(top,#fff,0%,#f8f8f8,100%);background-image:-moz-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);background-repeat:repeat-x;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff8f8f8',GradientType=0);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075)}.navbar .navbar-nav>.active>a{background-color:#f8f8f8}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,0.25)}.navbar-inverse{background-image:-webkit-gradient(linear,left 0,left 100%,from(#3c3c3c),to(#222));background-image:-webkit-linear-gradient(top,#3c3c3c,0%,#222,100%);background-image:-moz-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c',endColorstr='#ff222222',GradientType=0)}.navbar-inverse .navbar-nav>.active>a{background-color:#222}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-static-top,.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}.alert{text-shadow:0 1px 0 rgba(255,255,255,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05)}.alert-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#c8e5bc));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#c8e5bc,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);background-repeat:repeat-x;border-color:#b2dba1;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffc8e5bc',GradientType=0)}.alert-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#b9def0));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#b9def0,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);background-repeat:repeat-x;border-color:#9acfea;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffb9def0',GradientType=0)}.alert-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#f8efc0));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#f8efc0,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);background-repeat:repeat-x;border-color:#f5e79e;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fff8efc0',GradientType=0)}.alert-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#e7c3c3));background-image:-webkit-linear-gradient(top,#f2dede,0%,#e7c3c3,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);background-repeat:repeat-x;border-color:#dca7a7;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffe7c3c3',GradientType=0)}.progress{background-image:-webkit-gradient(linear,left 0,left 100%,from(#ebebeb),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#ebebeb,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb',endColorstr='#fff5f5f5',GradientType=0)}.progress-bar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.progress-bar-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.progress-bar-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.progress-bar-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.progress-bar-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{text-shadow:0 -1px 0 #3071a9;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3278b3));background-image:-webkit-linear-gradient(top,#428bca,0%,#3278b3,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3278b3 100%);background-image:linear-gradient(to bottom,#428bca 0,#3278b3 100%);background-repeat:repeat-x;border-color:#3278b3;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3278b3',GradientType=0)}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.panel-default>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f5f5f5),to(#e8e8e8));background-image:-webkit-linear-gradient(top,#f5f5f5,0%,#e8e8e8,100%);background-image:-moz-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#ffe8e8e8',GradientType=0)}.panel-primary>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.panel-success>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#d0e9c6));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#d0e9c6,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffd0e9c6',GradientType=0)}.panel-info>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#c4e3f3));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#c4e3f3,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffc4e3f3',GradientType=0)}.panel-warning>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#faf2cc));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#faf2cc,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fffaf2cc',GradientType=0)}.panel-danger>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#ebcccc));background-image:-webkit-linear-gradient(top,#f2dede,0%,#ebcccc,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffebcccc',GradientType=0)}.well{background-image:-webkit-gradient(linear,left 0,left 100%,from(#e8e8e8),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#e8e8e8,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);background-repeat:repeat-x;border-color:#dcdcdc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1)} \ No newline at end of file + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} +/*# sourceMappingURL=bootstrap-theme.min.css.map */ \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css deleted file mode 100755 index bbda4eed4afd..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css +++ /dev/null @@ -1,6805 +0,0 @@ -/*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ - -/*! normalize.css v2.1.0 | MIT License | git.io/normalize */ - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -main, -nav, -section, -summary { - display: block; -} - -audio, -canvas, -video { - display: inline-block; -} - -audio:not([controls]) { - display: none; - height: 0; -} - -[hidden] { - display: none; -} - -html { - font-family: sans-serif; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100%; -} - -body { - margin: 0; -} - -a:focus { - outline: thin dotted; -} - -a:active, -a:hover { - outline: 0; -} - -h1 { - margin: 0.67em 0; - font-size: 2em; -} - -abbr[title] { - border-bottom: 1px dotted; -} - -b, -strong { - font-weight: bold; -} - -dfn { - font-style: italic; -} - -hr { - height: 0; - -moz-box-sizing: content-box; - box-sizing: content-box; -} - -mark { - color: #000; - background: #ff0; -} - -code, -kbd, -pre, -samp { - font-family: monospace, serif; - font-size: 1em; -} - -pre { - white-space: pre-wrap; -} - -q { - quotes: "\201C" "\201D" "\2018" "\2019"; -} - -small { - font-size: 80%; -} - -sub, -sup { - position: relative; - font-size: 75%; - line-height: 0; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -img { - border: 0; -} - -svg:not(:root) { - overflow: hidden; -} - -figure { - margin: 0; -} - -fieldset { - padding: 0.35em 0.625em 0.75em; - margin: 0 2px; - border: 1px solid #c0c0c0; -} - -legend { - padding: 0; - border: 0; -} - -button, -input, -select, -textarea { - margin: 0; - font-family: inherit; - font-size: 100%; -} - -button, -input { - line-height: normal; -} - -button, -select { - text-transform: none; -} - -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - cursor: pointer; - -webkit-appearance: button; -} - -button[disabled], -html input[disabled] { - cursor: default; -} - -input[type="checkbox"], -input[type="radio"] { - padding: 0; - box-sizing: border-box; -} - -input[type="search"] { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - -webkit-appearance: textfield; -} - -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} - -button::-moz-focus-inner, -input::-moz-focus-inner { - padding: 0; - border: 0; -} - -textarea { - overflow: auto; - vertical-align: top; -} - -table { - border-collapse: collapse; - border-spacing: 0; -} - -@media print { - * { - color: #000 !important; - text-shadow: none !important; - background: transparent !important; - box-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - .ir a:after, - a[href^="javascript:"]:after, - a[href^="#"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - @page { - margin: 2cm .5cm; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } - .navbar { - display: none; - } - .table td, - .table th { - background-color: #fff !important; - } - .btn > .caret, - .dropup > .btn > .caret { - border-top-color: #000 !important; - } - .label { - border: 1px solid #000; - } - .table { - border-collapse: collapse !important; - } - .table-bordered th, - .table-bordered td { - border: 1px solid #ddd !important; - } -} - -*, -*:before, -*:after { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -html { - font-size: 62.5%; - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); -} - -body { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 1.428571429; - color: #333333; - background-color: #ffffff; -} - -input, -button, -select, -textarea { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} - -button, -input, -select[multiple], -textarea { - background-image: none; -} - -a { - color: #428bca; - text-decoration: none; -} - -a:hover, -a:focus { - color: #2a6496; - text-decoration: underline; -} - -a:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -img { - vertical-align: middle; -} - -.img-responsive { - display: block; - height: auto; - max-width: 100%; -} - -.img-rounded { - border-radius: 6px; -} - -.img-thumbnail { - display: inline-block; - height: auto; - max-width: 100%; - padding: 4px; - line-height: 1.428571429; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -.img-circle { - border-radius: 50%; -} - -hr { - margin-top: 20px; - margin-bottom: 20px; - border: 0; - border-top: 1px solid #eeeeee; -} - -.sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0 0 0 0); - border: 0; -} - -p { - margin: 0 0 10px; -} - -.lead { - margin-bottom: 20px; - font-size: 16.099999999999998px; - font-weight: 200; - line-height: 1.4; -} - -@media (min-width: 768px) { - .lead { - font-size: 21px; - } -} - -small { - font-size: 85%; -} - -cite { - font-style: normal; -} - -.text-muted { - color: #999999; -} - -.text-primary { - color: #428bca; -} - -.text-warning { - color: #c09853; -} - -.text-danger { - color: #b94a48; -} - -.text-success { - color: #468847; -} - -.text-info { - color: #3a87ad; -} - -.text-left { - text-align: left; -} - -.text-right { - text-align: right; -} - -.text-center { - text-align: center; -} - -h1, -h2, -h3, -h4, -h5, -h6, -.h1, -.h2, -.h3, -.h4, -.h5, -.h6 { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-weight: 500; - line-height: 1.1; -} - -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small, -.h1 small, -.h2 small, -.h3 small, -.h4 small, -.h5 small, -.h6 small { - font-weight: normal; - line-height: 1; - color: #999999; -} - -h1, -h2, -h3 { - margin-top: 20px; - margin-bottom: 10px; -} - -h4, -h5, -h6 { - margin-top: 10px; - margin-bottom: 10px; -} - -h1, -.h1 { - font-size: 36px; -} - -h2, -.h2 { - font-size: 30px; -} - -h3, -.h3 { - font-size: 24px; -} - -h4, -.h4 { - font-size: 18px; -} - -h5, -.h5 { - font-size: 14px; -} - -h6, -.h6 { - font-size: 12px; -} - -h1 small, -.h1 small { - font-size: 24px; -} - -h2 small, -.h2 small { - font-size: 18px; -} - -h3 small, -.h3 small, -h4 small, -.h4 small { - font-size: 14px; -} - -.page-header { - padding-bottom: 9px; - margin: 40px 0 20px; - border-bottom: 1px solid #eeeeee; -} - -ul, -ol { - margin-top: 0; - margin-bottom: 10px; -} - -ul ul, -ol ul, -ul ol, -ol ol { - margin-bottom: 0; -} - -.list-unstyled { - padding-left: 0; - list-style: none; -} - -.list-inline { - padding-left: 0; - list-style: none; -} - -.list-inline > li { - display: inline-block; - padding-right: 5px; - padding-left: 5px; -} - -dl { - margin-bottom: 20px; -} - -dt, -dd { - line-height: 1.428571429; -} - -dt { - font-weight: bold; -} - -dd { - margin-left: 0; -} - -@media (min-width: 768px) { - .dl-horizontal dt { - float: left; - width: 160px; - overflow: hidden; - clear: left; - text-align: right; - text-overflow: ellipsis; - white-space: nowrap; - } - .dl-horizontal dd { - margin-left: 180px; - } - .dl-horizontal dd:before, - .dl-horizontal dd:after { - display: table; - content: " "; - } - .dl-horizontal dd:after { - clear: both; - } - .dl-horizontal dd:before, - .dl-horizontal dd:after { - display: table; - content: " "; - } - .dl-horizontal dd:after { - clear: both; - } -} - -abbr[title], -abbr[data-original-title] { - cursor: help; - border-bottom: 1px dotted #999999; -} - -abbr.initialism { - font-size: 90%; - text-transform: uppercase; -} - -blockquote { - padding: 10px 20px; - margin: 0 0 20px; - border-left: 5px solid #eeeeee; -} - -blockquote p { - font-size: 17.5px; - font-weight: 300; - line-height: 1.25; -} - -blockquote p:last-child { - margin-bottom: 0; -} - -blockquote small { - display: block; - line-height: 1.428571429; - color: #999999; -} - -blockquote small:before { - content: '\2014 \00A0'; -} - -blockquote.pull-right { - padding-right: 15px; - padding-left: 0; - border-right: 5px solid #eeeeee; - border-left: 0; -} - -blockquote.pull-right p, -blockquote.pull-right small { - text-align: right; -} - -blockquote.pull-right small:before { - content: ''; -} - -blockquote.pull-right small:after { - content: '\00A0 \2014'; -} - -q:before, -q:after, -blockquote:before, -blockquote:after { - content: ""; -} - -address { - display: block; - margin-bottom: 20px; - font-style: normal; - line-height: 1.428571429; -} - -code, -pre { - font-family: Monaco, Menlo, Consolas, "Courier New", monospace; -} - -code { - padding: 2px 4px; - font-size: 90%; - color: #c7254e; - white-space: nowrap; - background-color: #f9f2f4; - border-radius: 4px; -} - -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 1.428571429; - color: #333333; - word-break: break-all; - word-wrap: break-word; - background-color: #f5f5f5; - border: 1px solid #cccccc; - border-radius: 4px; -} - -pre.prettyprint { - margin-bottom: 20px; -} - -pre code { - padding: 0; - font-size: inherit; - color: inherit; - white-space: pre-wrap; - background-color: transparent; - border: 0; -} - -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} - -.container { - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; -} - -.container:before, -.container:after { - display: table; - content: " "; -} - -.container:after { - clear: both; -} - -.container:before, -.container:after { - display: table; - content: " "; -} - -.container:after { - clear: both; -} - -.row { - margin-right: -15px; - margin-left: -15px; -} - -.row:before, -.row:after { - display: table; - content: " "; -} - -.row:after { - clear: both; -} - -.row:before, -.row:after { - display: table; - content: " "; -} - -.row:after { - clear: both; -} - -.col-xs-1, -.col-xs-2, -.col-xs-3, -.col-xs-4, -.col-xs-5, -.col-xs-6, -.col-xs-7, -.col-xs-8, -.col-xs-9, -.col-xs-10, -.col-xs-11, -.col-xs-12, -.col-sm-1, -.col-sm-2, -.col-sm-3, -.col-sm-4, -.col-sm-5, -.col-sm-6, -.col-sm-7, -.col-sm-8, -.col-sm-9, -.col-sm-10, -.col-sm-11, -.col-sm-12, -.col-md-1, -.col-md-2, -.col-md-3, -.col-md-4, -.col-md-5, -.col-md-6, -.col-md-7, -.col-md-8, -.col-md-9, -.col-md-10, -.col-md-11, -.col-md-12, -.col-lg-1, -.col-lg-2, -.col-lg-3, -.col-lg-4, -.col-lg-5, -.col-lg-6, -.col-lg-7, -.col-lg-8, -.col-lg-9, -.col-lg-10, -.col-lg-11, -.col-lg-12 { - position: relative; - min-height: 1px; - padding-right: 15px; - padding-left: 15px; -} - -.col-xs-1, -.col-xs-2, -.col-xs-3, -.col-xs-4, -.col-xs-5, -.col-xs-6, -.col-xs-7, -.col-xs-8, -.col-xs-9, -.col-xs-10, -.col-xs-11 { - float: left; -} - -.col-xs-1 { - width: 8.333333333333332%; -} - -.col-xs-2 { - width: 16.666666666666664%; -} - -.col-xs-3 { - width: 25%; -} - -.col-xs-4 { - width: 33.33333333333333%; -} - -.col-xs-5 { - width: 41.66666666666667%; -} - -.col-xs-6 { - width: 50%; -} - -.col-xs-7 { - width: 58.333333333333336%; -} - -.col-xs-8 { - width: 66.66666666666666%; -} - -.col-xs-9 { - width: 75%; -} - -.col-xs-10 { - width: 83.33333333333334%; -} - -.col-xs-11 { - width: 91.66666666666666%; -} - -.col-xs-12 { - width: 100%; -} - -@media (min-width: 768px) { - .container { - max-width: 750px; - } - .col-sm-1, - .col-sm-2, - .col-sm-3, - .col-sm-4, - .col-sm-5, - .col-sm-6, - .col-sm-7, - .col-sm-8, - .col-sm-9, - .col-sm-10, - .col-sm-11 { - float: left; - } - .col-sm-1 { - width: 8.333333333333332%; - } - .col-sm-2 { - width: 16.666666666666664%; - } - .col-sm-3 { - width: 25%; - } - .col-sm-4 { - width: 33.33333333333333%; - } - .col-sm-5 { - width: 41.66666666666667%; - } - .col-sm-6 { - width: 50%; - } - .col-sm-7 { - width: 58.333333333333336%; - } - .col-sm-8 { - width: 66.66666666666666%; - } - .col-sm-9 { - width: 75%; - } - .col-sm-10 { - width: 83.33333333333334%; - } - .col-sm-11 { - width: 91.66666666666666%; - } - .col-sm-12 { - width: 100%; - } - .col-sm-push-1 { - left: 8.333333333333332%; - } - .col-sm-push-2 { - left: 16.666666666666664%; - } - .col-sm-push-3 { - left: 25%; - } - .col-sm-push-4 { - left: 33.33333333333333%; - } - .col-sm-push-5 { - left: 41.66666666666667%; - } - .col-sm-push-6 { - left: 50%; - } - .col-sm-push-7 { - left: 58.333333333333336%; - } - .col-sm-push-8 { - left: 66.66666666666666%; - } - .col-sm-push-9 { - left: 75%; - } - .col-sm-push-10 { - left: 83.33333333333334%; - } - .col-sm-push-11 { - left: 91.66666666666666%; - } - .col-sm-pull-1 { - right: 8.333333333333332%; - } - .col-sm-pull-2 { - right: 16.666666666666664%; - } - .col-sm-pull-3 { - right: 25%; - } - .col-sm-pull-4 { - right: 33.33333333333333%; - } - .col-sm-pull-5 { - right: 41.66666666666667%; - } - .col-sm-pull-6 { - right: 50%; - } - .col-sm-pull-7 { - right: 58.333333333333336%; - } - .col-sm-pull-8 { - right: 66.66666666666666%; - } - .col-sm-pull-9 { - right: 75%; - } - .col-sm-pull-10 { - right: 83.33333333333334%; - } - .col-sm-pull-11 { - right: 91.66666666666666%; - } - .col-sm-offset-1 { - margin-left: 8.333333333333332%; - } - .col-sm-offset-2 { - margin-left: 16.666666666666664%; - } - .col-sm-offset-3 { - margin-left: 25%; - } - .col-sm-offset-4 { - margin-left: 33.33333333333333%; - } - .col-sm-offset-5 { - margin-left: 41.66666666666667%; - } - .col-sm-offset-6 { - margin-left: 50%; - } - .col-sm-offset-7 { - margin-left: 58.333333333333336%; - } - .col-sm-offset-8 { - margin-left: 66.66666666666666%; - } - .col-sm-offset-9 { - margin-left: 75%; - } - .col-sm-offset-10 { - margin-left: 83.33333333333334%; - } - .col-sm-offset-11 { - margin-left: 91.66666666666666%; - } -} - -@media (min-width: 992px) { - .container { - max-width: 970px; - } - .col-md-1, - .col-md-2, - .col-md-3, - .col-md-4, - .col-md-5, - .col-md-6, - .col-md-7, - .col-md-8, - .col-md-9, - .col-md-10, - .col-md-11 { - float: left; - } - .col-md-1 { - width: 8.333333333333332%; - } - .col-md-2 { - width: 16.666666666666664%; - } - .col-md-3 { - width: 25%; - } - .col-md-4 { - width: 33.33333333333333%; - } - .col-md-5 { - width: 41.66666666666667%; - } - .col-md-6 { - width: 50%; - } - .col-md-7 { - width: 58.333333333333336%; - } - .col-md-8 { - width: 66.66666666666666%; - } - .col-md-9 { - width: 75%; - } - .col-md-10 { - width: 83.33333333333334%; - } - .col-md-11 { - width: 91.66666666666666%; - } - .col-md-12 { - width: 100%; - } - .col-md-push-0 { - left: auto; - } - .col-md-push-1 { - left: 8.333333333333332%; - } - .col-md-push-2 { - left: 16.666666666666664%; - } - .col-md-push-3 { - left: 25%; - } - .col-md-push-4 { - left: 33.33333333333333%; - } - .col-md-push-5 { - left: 41.66666666666667%; - } - .col-md-push-6 { - left: 50%; - } - .col-md-push-7 { - left: 58.333333333333336%; - } - .col-md-push-8 { - left: 66.66666666666666%; - } - .col-md-push-9 { - left: 75%; - } - .col-md-push-10 { - left: 83.33333333333334%; - } - .col-md-push-11 { - left: 91.66666666666666%; - } - .col-md-pull-0 { - right: auto; - } - .col-md-pull-1 { - right: 8.333333333333332%; - } - .col-md-pull-2 { - right: 16.666666666666664%; - } - .col-md-pull-3 { - right: 25%; - } - .col-md-pull-4 { - right: 33.33333333333333%; - } - .col-md-pull-5 { - right: 41.66666666666667%; - } - .col-md-pull-6 { - right: 50%; - } - .col-md-pull-7 { - right: 58.333333333333336%; - } - .col-md-pull-8 { - right: 66.66666666666666%; - } - .col-md-pull-9 { - right: 75%; - } - .col-md-pull-10 { - right: 83.33333333333334%; - } - .col-md-pull-11 { - right: 91.66666666666666%; - } - .col-md-offset-0 { - margin-left: 0; - } - .col-md-offset-1 { - margin-left: 8.333333333333332%; - } - .col-md-offset-2 { - margin-left: 16.666666666666664%; - } - .col-md-offset-3 { - margin-left: 25%; - } - .col-md-offset-4 { - margin-left: 33.33333333333333%; - } - .col-md-offset-5 { - margin-left: 41.66666666666667%; - } - .col-md-offset-6 { - margin-left: 50%; - } - .col-md-offset-7 { - margin-left: 58.333333333333336%; - } - .col-md-offset-8 { - margin-left: 66.66666666666666%; - } - .col-md-offset-9 { - margin-left: 75%; - } - .col-md-offset-10 { - margin-left: 83.33333333333334%; - } - .col-md-offset-11 { - margin-left: 91.66666666666666%; - } -} - -@media (min-width: 1200px) { - .container { - max-width: 1170px; - } - .col-lg-1, - .col-lg-2, - .col-lg-3, - .col-lg-4, - .col-lg-5, - .col-lg-6, - .col-lg-7, - .col-lg-8, - .col-lg-9, - .col-lg-10, - .col-lg-11 { - float: left; - } - .col-lg-1 { - width: 8.333333333333332%; - } - .col-lg-2 { - width: 16.666666666666664%; - } - .col-lg-3 { - width: 25%; - } - .col-lg-4 { - width: 33.33333333333333%; - } - .col-lg-5 { - width: 41.66666666666667%; - } - .col-lg-6 { - width: 50%; - } - .col-lg-7 { - width: 58.333333333333336%; - } - .col-lg-8 { - width: 66.66666666666666%; - } - .col-lg-9 { - width: 75%; - } - .col-lg-10 { - width: 83.33333333333334%; - } - .col-lg-11 { - width: 91.66666666666666%; - } - .col-lg-12 { - width: 100%; - } - .col-lg-push-0 { - left: auto; - } - .col-lg-push-1 { - left: 8.333333333333332%; - } - .col-lg-push-2 { - left: 16.666666666666664%; - } - .col-lg-push-3 { - left: 25%; - } - .col-lg-push-4 { - left: 33.33333333333333%; - } - .col-lg-push-5 { - left: 41.66666666666667%; - } - .col-lg-push-6 { - left: 50%; - } - .col-lg-push-7 { - left: 58.333333333333336%; - } - .col-lg-push-8 { - left: 66.66666666666666%; - } - .col-lg-push-9 { - left: 75%; - } - .col-lg-push-10 { - left: 83.33333333333334%; - } - .col-lg-push-11 { - left: 91.66666666666666%; - } - .col-lg-pull-0 { - right: auto; - } - .col-lg-pull-1 { - right: 8.333333333333332%; - } - .col-lg-pull-2 { - right: 16.666666666666664%; - } - .col-lg-pull-3 { - right: 25%; - } - .col-lg-pull-4 { - right: 33.33333333333333%; - } - .col-lg-pull-5 { - right: 41.66666666666667%; - } - .col-lg-pull-6 { - right: 50%; - } - .col-lg-pull-7 { - right: 58.333333333333336%; - } - .col-lg-pull-8 { - right: 66.66666666666666%; - } - .col-lg-pull-9 { - right: 75%; - } - .col-lg-pull-10 { - right: 83.33333333333334%; - } - .col-lg-pull-11 { - right: 91.66666666666666%; - } - .col-lg-offset-0 { - margin-left: 0; - } - .col-lg-offset-1 { - margin-left: 8.333333333333332%; - } - .col-lg-offset-2 { - margin-left: 16.666666666666664%; - } - .col-lg-offset-3 { - margin-left: 25%; - } - .col-lg-offset-4 { - margin-left: 33.33333333333333%; - } - .col-lg-offset-5 { - margin-left: 41.66666666666667%; - } - .col-lg-offset-6 { - margin-left: 50%; - } - .col-lg-offset-7 { - margin-left: 58.333333333333336%; - } - .col-lg-offset-8 { - margin-left: 66.66666666666666%; - } - .col-lg-offset-9 { - margin-left: 75%; - } - .col-lg-offset-10 { - margin-left: 83.33333333333334%; - } - .col-lg-offset-11 { - margin-left: 91.66666666666666%; - } -} - -table { - max-width: 100%; - background-color: transparent; -} - -th { - text-align: left; -} - -.table { - width: 100%; - margin-bottom: 20px; -} - -.table thead > tr > th, -.table tbody > tr > th, -.table tfoot > tr > th, -.table thead > tr > td, -.table tbody > tr > td, -.table tfoot > tr > td { - padding: 8px; - line-height: 1.428571429; - vertical-align: top; - border-top: 1px solid #dddddd; -} - -.table thead > tr > th { - vertical-align: bottom; - border-bottom: 2px solid #dddddd; -} - -.table caption + thead tr:first-child th, -.table colgroup + thead tr:first-child th, -.table thead:first-child tr:first-child th, -.table caption + thead tr:first-child td, -.table colgroup + thead tr:first-child td, -.table thead:first-child tr:first-child td { - border-top: 0; -} - -.table tbody + tbody { - border-top: 2px solid #dddddd; -} - -.table .table { - background-color: #ffffff; -} - -.table-condensed thead > tr > th, -.table-condensed tbody > tr > th, -.table-condensed tfoot > tr > th, -.table-condensed thead > tr > td, -.table-condensed tbody > tr > td, -.table-condensed tfoot > tr > td { - padding: 5px; -} - -.table-bordered { - border: 1px solid #dddddd; -} - -.table-bordered > thead > tr > th, -.table-bordered > tbody > tr > th, -.table-bordered > tfoot > tr > th, -.table-bordered > thead > tr > td, -.table-bordered > tbody > tr > td, -.table-bordered > tfoot > tr > td { - border: 1px solid #dddddd; -} - -.table-bordered > thead > tr > th, -.table-bordered > thead > tr > td { - border-bottom-width: 2px; -} - -.table-striped > tbody > tr:nth-child(odd) > td, -.table-striped > tbody > tr:nth-child(odd) > th { - background-color: #f9f9f9; -} - -.table-hover > tbody > tr:hover > td, -.table-hover > tbody > tr:hover > th { - background-color: #f5f5f5; -} - -table col[class*="col-"] { - display: table-column; - float: none; -} - -table td[class*="col-"], -table th[class*="col-"] { - display: table-cell; - float: none; -} - -.table > thead > tr > td.active, -.table > tbody > tr > td.active, -.table > tfoot > tr > td.active, -.table > thead > tr > th.active, -.table > tbody > tr > th.active, -.table > tfoot > tr > th.active, -.table > thead > tr.active > td, -.table > tbody > tr.active > td, -.table > tfoot > tr.active > td, -.table > thead > tr.active > th, -.table > tbody > tr.active > th, -.table > tfoot > tr.active > th { - background-color: #f5f5f5; -} - -.table > thead > tr > td.success, -.table > tbody > tr > td.success, -.table > tfoot > tr > td.success, -.table > thead > tr > th.success, -.table > tbody > tr > th.success, -.table > tfoot > tr > th.success, -.table > thead > tr.success > td, -.table > tbody > tr.success > td, -.table > tfoot > tr.success > td, -.table > thead > tr.success > th, -.table > tbody > tr.success > th, -.table > tfoot > tr.success > th { - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.table-hover > tbody > tr > td.success:hover, -.table-hover > tbody > tr > th.success:hover, -.table-hover > tbody > tr.success:hover > td { - background-color: #d0e9c6; - border-color: #c9e2b3; -} - -.table > thead > tr > td.danger, -.table > tbody > tr > td.danger, -.table > tfoot > tr > td.danger, -.table > thead > tr > th.danger, -.table > tbody > tr > th.danger, -.table > tfoot > tr > th.danger, -.table > thead > tr.danger > td, -.table > tbody > tr.danger > td, -.table > tfoot > tr.danger > td, -.table > thead > tr.danger > th, -.table > tbody > tr.danger > th, -.table > tfoot > tr.danger > th { - background-color: #f2dede; - border-color: #eed3d7; -} - -.table-hover > tbody > tr > td.danger:hover, -.table-hover > tbody > tr > th.danger:hover, -.table-hover > tbody > tr.danger:hover > td { - background-color: #ebcccc; - border-color: #e6c1c7; -} - -.table > thead > tr > td.warning, -.table > tbody > tr > td.warning, -.table > tfoot > tr > td.warning, -.table > thead > tr > th.warning, -.table > tbody > tr > th.warning, -.table > tfoot > tr > th.warning, -.table > thead > tr.warning > td, -.table > tbody > tr.warning > td, -.table > tfoot > tr.warning > td, -.table > thead > tr.warning > th, -.table > tbody > tr.warning > th, -.table > tfoot > tr.warning > th { - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.table-hover > tbody > tr > td.warning:hover, -.table-hover > tbody > tr > th.warning:hover, -.table-hover > tbody > tr.warning:hover > td { - background-color: #faf2cc; - border-color: #f8e5be; -} - -@media (max-width: 768px) { - .table-responsive { - width: 100%; - margin-bottom: 15px; - overflow-x: scroll; - overflow-y: hidden; - border: 1px solid #dddddd; - } - .table-responsive > .table { - margin-bottom: 0; - background-color: #fff; - } - .table-responsive > .table > thead > tr > th, - .table-responsive > .table > tbody > tr > th, - .table-responsive > .table > tfoot > tr > th, - .table-responsive > .table > thead > tr > td, - .table-responsive > .table > tbody > tr > td, - .table-responsive > .table > tfoot > tr > td { - white-space: nowrap; - } - .table-responsive > .table-bordered { - border: 0; - } - .table-responsive > .table-bordered > thead > tr > th:first-child, - .table-responsive > .table-bordered > tbody > tr > th:first-child, - .table-responsive > .table-bordered > tfoot > tr > th:first-child, - .table-responsive > .table-bordered > thead > tr > td:first-child, - .table-responsive > .table-bordered > tbody > tr > td:first-child, - .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; - } - .table-responsive > .table-bordered > thead > tr > th:last-child, - .table-responsive > .table-bordered > tbody > tr > th:last-child, - .table-responsive > .table-bordered > tfoot > tr > th:last-child, - .table-responsive > .table-bordered > thead > tr > td:last-child, - .table-responsive > .table-bordered > tbody > tr > td:last-child, - .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; - } - .table-responsive > .table-bordered > thead > tr:last-child > th, - .table-responsive > .table-bordered > tbody > tr:last-child > th, - .table-responsive > .table-bordered > tfoot > tr:last-child > th, - .table-responsive > .table-bordered > thead > tr:last-child > td, - .table-responsive > .table-bordered > tbody > tr:last-child > td, - .table-responsive > .table-bordered > tfoot > tr:last-child > td { - border-bottom: 0; - } -} - -fieldset { - padding: 0; - margin: 0; - border: 0; -} - -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: inherit; - color: #333333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} - -label { - display: inline-block; - margin-bottom: 5px; - font-weight: bold; -} - -input[type="search"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - /* IE8-9 */ - - line-height: normal; -} - -input[type="file"] { - display: block; -} - -select[multiple], -select[size] { - height: auto; -} - -select optgroup { - font-family: inherit; - font-size: inherit; - font-style: inherit; -} - -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -input[type="number"]::-webkit-outer-spin-button, -input[type="number"]::-webkit-inner-spin-button { - height: auto; -} - -.form-control:-moz-placeholder { - color: #999999; -} - -.form-control::-moz-placeholder { - color: #999999; -} - -.form-control:-ms-input-placeholder { - color: #999999; -} - -.form-control::-webkit-input-placeholder { - color: #999999; -} - -.form-control { - display: block; - width: 100%; - height: 34px; - padding: 6px 12px; - font-size: 14px; - line-height: 1.428571429; - color: #555555; - vertical-align: middle; - background-color: #ffffff; - border: 1px solid #cccccc; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; - transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; -} - -.form-control:focus { - border-color: #66afe9; - outline: 0; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); -} - -.form-control[disabled], -.form-control[readonly], -fieldset[disabled] .form-control { - cursor: not-allowed; - background-color: #eeeeee; -} - -textarea.form-control { - height: auto; -} - -.form-group { - margin-bottom: 15px; -} - -.radio, -.checkbox { - display: block; - min-height: 20px; - padding-left: 20px; - margin-top: 10px; - margin-bottom: 10px; - vertical-align: middle; -} - -.radio label, -.checkbox label { - display: inline; - margin-bottom: 0; - font-weight: normal; - cursor: pointer; -} - -.radio input[type="radio"], -.radio-inline input[type="radio"], -.checkbox input[type="checkbox"], -.checkbox-inline input[type="checkbox"] { - float: left; - margin-left: -20px; -} - -.radio + .radio, -.checkbox + .checkbox { - margin-top: -5px; -} - -.radio-inline, -.checkbox-inline { - display: inline-block; - padding-left: 20px; - margin-bottom: 0; - font-weight: normal; - vertical-align: middle; - cursor: pointer; -} - -.radio-inline + .radio-inline, -.checkbox-inline + .checkbox-inline { - margin-top: 0; - margin-left: 10px; -} - -input[type="radio"][disabled], -input[type="checkbox"][disabled], -.radio[disabled], -.radio-inline[disabled], -.checkbox[disabled], -.checkbox-inline[disabled], -fieldset[disabled] input[type="radio"], -fieldset[disabled] input[type="checkbox"], -fieldset[disabled] .radio, -fieldset[disabled] .radio-inline, -fieldset[disabled] .checkbox, -fieldset[disabled] .checkbox-inline { - cursor: not-allowed; -} - -.input-sm { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -select.input-sm { - height: 30px; - line-height: 30px; -} - -textarea.input-sm { - height: auto; -} - -.input-lg { - height: 45px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -select.input-lg { - height: 45px; - line-height: 45px; -} - -textarea.input-lg { - height: auto; -} - -.has-warning .help-block, -.has-warning .control-label { - color: #c09853; -} - -.has-warning .form-control { - border-color: #c09853; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-warning .form-control:focus { - border-color: #a47e3c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; -} - -.has-warning .input-group-addon { - color: #c09853; - background-color: #fcf8e3; - border-color: #c09853; -} - -.has-error .help-block, -.has-error .control-label { - color: #b94a48; -} - -.has-error .form-control { - border-color: #b94a48; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-error .form-control:focus { - border-color: #953b39; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; -} - -.has-error .input-group-addon { - color: #b94a48; - background-color: #f2dede; - border-color: #b94a48; -} - -.has-success .help-block, -.has-success .control-label { - color: #468847; -} - -.has-success .form-control { - border-color: #468847; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-success .form-control:focus { - border-color: #356635; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; -} - -.has-success .input-group-addon { - color: #468847; - background-color: #dff0d8; - border-color: #468847; -} - -.form-control-static { - padding-top: 7px; - margin-bottom: 0; -} - -.help-block { - display: block; - margin-top: 5px; - margin-bottom: 10px; - color: #737373; -} - -@media (min-width: 768px) { - .form-inline .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .form-control { - display: inline-block; - } - .form-inline .radio, - .form-inline .checkbox { - display: inline-block; - padding-left: 0; - margin-top: 0; - margin-bottom: 0; - } - .form-inline .radio input[type="radio"], - .form-inline .checkbox input[type="checkbox"] { - float: none; - margin-left: 0; - } -} - -.form-horizontal .control-label, -.form-horizontal .radio, -.form-horizontal .checkbox, -.form-horizontal .radio-inline, -.form-horizontal .checkbox-inline { - padding-top: 7px; - margin-top: 0; - margin-bottom: 0; -} - -.form-horizontal .form-group { - margin-right: -15px; - margin-left: -15px; -} - -.form-horizontal .form-group:before, -.form-horizontal .form-group:after { - display: table; - content: " "; -} - -.form-horizontal .form-group:after { - clear: both; -} - -.form-horizontal .form-group:before, -.form-horizontal .form-group:after { - display: table; - content: " "; -} - -.form-horizontal .form-group:after { - clear: both; -} - -@media (min-width: 768px) { - .form-horizontal .control-label { - text-align: right; - } -} - -.btn { - display: inline-block; - padding: 6px 12px; - margin-bottom: 0; - font-size: 14px; - font-weight: normal; - line-height: 1.428571429; - text-align: center; - white-space: nowrap; - vertical-align: middle; - cursor: pointer; - border: 1px solid transparent; - border-radius: 4px; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - -o-user-select: none; - user-select: none; -} - -.btn:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -.btn:hover, -.btn:focus { - color: #333333; - text-decoration: none; -} - -.btn:active, -.btn.active { - background-image: none; - outline: 0; - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn.disabled, -.btn[disabled], -fieldset[disabled] .btn { - pointer-events: none; - cursor: not-allowed; - opacity: 0.65; - filter: alpha(opacity=65); - -webkit-box-shadow: none; - box-shadow: none; -} - -.btn-default { - color: #333333; - background-color: #ffffff; - border-color: #cccccc; -} - -.btn-default:hover, -.btn-default:focus, -.btn-default:active, -.btn-default.active, -.open .dropdown-toggle.btn-default { - color: #333333; - background-color: #ebebeb; - border-color: #adadad; -} - -.btn-default:active, -.btn-default.active, -.open .dropdown-toggle.btn-default { - background-image: none; -} - -.btn-default.disabled, -.btn-default[disabled], -fieldset[disabled] .btn-default, -.btn-default.disabled:hover, -.btn-default[disabled]:hover, -fieldset[disabled] .btn-default:hover, -.btn-default.disabled:focus, -.btn-default[disabled]:focus, -fieldset[disabled] .btn-default:focus, -.btn-default.disabled:active, -.btn-default[disabled]:active, -fieldset[disabled] .btn-default:active, -.btn-default.disabled.active, -.btn-default[disabled].active, -fieldset[disabled] .btn-default.active { - background-color: #ffffff; - border-color: #cccccc; -} - -.btn-primary { - color: #ffffff; - background-color: #428bca; - border-color: #357ebd; -} - -.btn-primary:hover, -.btn-primary:focus, -.btn-primary:active, -.btn-primary.active, -.open .dropdown-toggle.btn-primary { - color: #ffffff; - background-color: #3276b1; - border-color: #285e8e; -} - -.btn-primary:active, -.btn-primary.active, -.open .dropdown-toggle.btn-primary { - background-image: none; -} - -.btn-primary.disabled, -.btn-primary[disabled], -fieldset[disabled] .btn-primary, -.btn-primary.disabled:hover, -.btn-primary[disabled]:hover, -fieldset[disabled] .btn-primary:hover, -.btn-primary.disabled:focus, -.btn-primary[disabled]:focus, -fieldset[disabled] .btn-primary:focus, -.btn-primary.disabled:active, -.btn-primary[disabled]:active, -fieldset[disabled] .btn-primary:active, -.btn-primary.disabled.active, -.btn-primary[disabled].active, -fieldset[disabled] .btn-primary.active { - background-color: #428bca; - border-color: #357ebd; -} - -.btn-warning { - color: #ffffff; - background-color: #f0ad4e; - border-color: #eea236; -} - -.btn-warning:hover, -.btn-warning:focus, -.btn-warning:active, -.btn-warning.active, -.open .dropdown-toggle.btn-warning { - color: #ffffff; - background-color: #ed9c28; - border-color: #d58512; -} - -.btn-warning:active, -.btn-warning.active, -.open .dropdown-toggle.btn-warning { - background-image: none; -} - -.btn-warning.disabled, -.btn-warning[disabled], -fieldset[disabled] .btn-warning, -.btn-warning.disabled:hover, -.btn-warning[disabled]:hover, -fieldset[disabled] .btn-warning:hover, -.btn-warning.disabled:focus, -.btn-warning[disabled]:focus, -fieldset[disabled] .btn-warning:focus, -.btn-warning.disabled:active, -.btn-warning[disabled]:active, -fieldset[disabled] .btn-warning:active, -.btn-warning.disabled.active, -.btn-warning[disabled].active, -fieldset[disabled] .btn-warning.active { - background-color: #f0ad4e; - border-color: #eea236; -} - -.btn-danger { - color: #ffffff; - background-color: #d9534f; - border-color: #d43f3a; -} - -.btn-danger:hover, -.btn-danger:focus, -.btn-danger:active, -.btn-danger.active, -.open .dropdown-toggle.btn-danger { - color: #ffffff; - background-color: #d2322d; - border-color: #ac2925; -} - -.btn-danger:active, -.btn-danger.active, -.open .dropdown-toggle.btn-danger { - background-image: none; -} - -.btn-danger.disabled, -.btn-danger[disabled], -fieldset[disabled] .btn-danger, -.btn-danger.disabled:hover, -.btn-danger[disabled]:hover, -fieldset[disabled] .btn-danger:hover, -.btn-danger.disabled:focus, -.btn-danger[disabled]:focus, -fieldset[disabled] .btn-danger:focus, -.btn-danger.disabled:active, -.btn-danger[disabled]:active, -fieldset[disabled] .btn-danger:active, -.btn-danger.disabled.active, -.btn-danger[disabled].active, -fieldset[disabled] .btn-danger.active { - background-color: #d9534f; - border-color: #d43f3a; -} - -.btn-success { - color: #ffffff; - background-color: #5cb85c; - border-color: #4cae4c; -} - -.btn-success:hover, -.btn-success:focus, -.btn-success:active, -.btn-success.active, -.open .dropdown-toggle.btn-success { - color: #ffffff; - background-color: #47a447; - border-color: #398439; -} - -.btn-success:active, -.btn-success.active, -.open .dropdown-toggle.btn-success { - background-image: none; -} - -.btn-success.disabled, -.btn-success[disabled], -fieldset[disabled] .btn-success, -.btn-success.disabled:hover, -.btn-success[disabled]:hover, -fieldset[disabled] .btn-success:hover, -.btn-success.disabled:focus, -.btn-success[disabled]:focus, -fieldset[disabled] .btn-success:focus, -.btn-success.disabled:active, -.btn-success[disabled]:active, -fieldset[disabled] .btn-success:active, -.btn-success.disabled.active, -.btn-success[disabled].active, -fieldset[disabled] .btn-success.active { - background-color: #5cb85c; - border-color: #4cae4c; -} - -.btn-info { - color: #ffffff; - background-color: #5bc0de; - border-color: #46b8da; -} - -.btn-info:hover, -.btn-info:focus, -.btn-info:active, -.btn-info.active, -.open .dropdown-toggle.btn-info { - color: #ffffff; - background-color: #39b3d7; - border-color: #269abc; -} - -.btn-info:active, -.btn-info.active, -.open .dropdown-toggle.btn-info { - background-image: none; -} - -.btn-info.disabled, -.btn-info[disabled], -fieldset[disabled] .btn-info, -.btn-info.disabled:hover, -.btn-info[disabled]:hover, -fieldset[disabled] .btn-info:hover, -.btn-info.disabled:focus, -.btn-info[disabled]:focus, -fieldset[disabled] .btn-info:focus, -.btn-info.disabled:active, -.btn-info[disabled]:active, -fieldset[disabled] .btn-info:active, -.btn-info.disabled.active, -.btn-info[disabled].active, -fieldset[disabled] .btn-info.active { - background-color: #5bc0de; - border-color: #46b8da; -} - -.btn-link { - font-weight: normal; - color: #428bca; - cursor: pointer; - border-radius: 0; -} - -.btn-link, -.btn-link:active, -.btn-link[disabled], -fieldset[disabled] .btn-link { - background-color: transparent; - -webkit-box-shadow: none; - box-shadow: none; -} - -.btn-link, -.btn-link:hover, -.btn-link:focus, -.btn-link:active { - border-color: transparent; -} - -.btn-link:hover, -.btn-link:focus { - color: #2a6496; - text-decoration: underline; - background-color: transparent; -} - -.btn-link[disabled]:hover, -fieldset[disabled] .btn-link:hover, -.btn-link[disabled]:focus, -fieldset[disabled] .btn-link:focus { - color: #999999; - text-decoration: none; -} - -.btn-lg { - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -.btn-sm, -.btn-xs { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-xs { - padding: 1px 5px; -} - -.btn-block { - display: block; - width: 100%; - padding-right: 0; - padding-left: 0; -} - -.btn-block + .btn-block { - margin-top: 5px; -} - -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} - -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} - -.fade.in { - opacity: 1; -} - -.collapse { - display: none; -} - -.collapse.in { - display: block; -} - -.collapsing { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition: height 0.35s ease; - transition: height 0.35s ease; -} - -@font-face { - font-family: 'Glyphicons Halflings'; - src: url('../fonts/glyphicons-halflings-regular.eot'); - src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg'); -} - -.glyphicon { - position: relative; - top: 1px; - display: inline-block; - font-family: 'Glyphicons Halflings'; - -webkit-font-smoothing: antialiased; - font-style: normal; - font-weight: normal; - line-height: 1; -} - -.glyphicon-asterisk:before { - content: "\2a"; -} - -.glyphicon-plus:before { - content: "\2b"; -} - -.glyphicon-euro:before { - content: "\20ac"; -} - -.glyphicon-minus:before { - content: "\2212"; -} - -.glyphicon-cloud:before { - content: "\2601"; -} - -.glyphicon-envelope:before { - content: "\2709"; -} - -.glyphicon-pencil:before { - content: "\270f"; -} - -.glyphicon-glass:before { - content: "\e001"; -} - -.glyphicon-music:before { - content: "\e002"; -} - -.glyphicon-search:before { - content: "\e003"; -} - -.glyphicon-heart:before { - content: "\e005"; -} - -.glyphicon-star:before { - content: "\e006"; -} - -.glyphicon-star-empty:before { - content: "\e007"; -} - -.glyphicon-user:before { - content: "\e008"; -} - -.glyphicon-film:before { - content: "\e009"; -} - -.glyphicon-th-large:before { - content: "\e010"; -} - -.glyphicon-th:before { - content: "\e011"; -} - -.glyphicon-th-list:before { - content: "\e012"; -} - -.glyphicon-ok:before { - content: "\e013"; -} - -.glyphicon-remove:before { - content: "\e014"; -} - -.glyphicon-zoom-in:before { - content: "\e015"; -} - -.glyphicon-zoom-out:before { - content: "\e016"; -} - -.glyphicon-off:before { - content: "\e017"; -} - -.glyphicon-signal:before { - content: "\e018"; -} - -.glyphicon-cog:before { - content: "\e019"; -} - -.glyphicon-trash:before { - content: "\e020"; -} - -.glyphicon-home:before { - content: "\e021"; -} - -.glyphicon-file:before { - content: "\e022"; -} - -.glyphicon-time:before { - content: "\e023"; -} - -.glyphicon-road:before { - content: "\e024"; -} - -.glyphicon-download-alt:before { - content: "\e025"; -} - -.glyphicon-download:before { - content: "\e026"; -} - -.glyphicon-upload:before { - content: "\e027"; -} - -.glyphicon-inbox:before { - content: "\e028"; -} - -.glyphicon-play-circle:before { - content: "\e029"; -} - -.glyphicon-repeat:before { - content: "\e030"; -} - -.glyphicon-refresh:before { - content: "\e031"; -} - -.glyphicon-list-alt:before { - content: "\e032"; -} - -.glyphicon-flag:before { - content: "\e034"; -} - -.glyphicon-headphones:before { - content: "\e035"; -} - -.glyphicon-volume-off:before { - content: "\e036"; -} - -.glyphicon-volume-down:before { - content: "\e037"; -} - -.glyphicon-volume-up:before { - content: "\e038"; -} - -.glyphicon-qrcode:before { - content: "\e039"; -} - -.glyphicon-barcode:before { - content: "\e040"; -} - -.glyphicon-tag:before { - content: "\e041"; -} - -.glyphicon-tags:before { - content: "\e042"; -} - -.glyphicon-book:before { - content: "\e043"; -} - -.glyphicon-print:before { - content: "\e045"; -} - -.glyphicon-font:before { - content: "\e047"; -} - -.glyphicon-bold:before { - content: "\e048"; -} - -.glyphicon-italic:before { - content: "\e049"; -} - -.glyphicon-text-height:before { - content: "\e050"; -} - -.glyphicon-text-width:before { - content: "\e051"; -} - -.glyphicon-align-left:before { - content: "\e052"; -} - -.glyphicon-align-center:before { - content: "\e053"; -} - -.glyphicon-align-right:before { - content: "\e054"; -} - -.glyphicon-align-justify:before { - content: "\e055"; -} - -.glyphicon-list:before { - content: "\e056"; -} - -.glyphicon-indent-left:before { - content: "\e057"; -} - -.glyphicon-indent-right:before { - content: "\e058"; -} - -.glyphicon-facetime-video:before { - content: "\e059"; -} - -.glyphicon-picture:before { - content: "\e060"; -} - -.glyphicon-map-marker:before { - content: "\e062"; -} - -.glyphicon-adjust:before { - content: "\e063"; -} - -.glyphicon-tint:before { - content: "\e064"; -} - -.glyphicon-edit:before { - content: "\e065"; -} - -.glyphicon-share:before { - content: "\e066"; -} - -.glyphicon-check:before { - content: "\e067"; -} - -.glyphicon-move:before { - content: "\e068"; -} - -.glyphicon-step-backward:before { - content: "\e069"; -} - -.glyphicon-fast-backward:before { - content: "\e070"; -} - -.glyphicon-backward:before { - content: "\e071"; -} - -.glyphicon-play:before { - content: "\e072"; -} - -.glyphicon-pause:before { - content: "\e073"; -} - -.glyphicon-stop:before { - content: "\e074"; -} - -.glyphicon-forward:before { - content: "\e075"; -} - -.glyphicon-fast-forward:before { - content: "\e076"; -} - -.glyphicon-step-forward:before { - content: "\e077"; -} - -.glyphicon-eject:before { - content: "\e078"; -} - -.glyphicon-chevron-left:before { - content: "\e079"; -} - -.glyphicon-chevron-right:before { - content: "\e080"; -} - -.glyphicon-plus-sign:before { - content: "\e081"; -} - -.glyphicon-minus-sign:before { - content: "\e082"; -} - -.glyphicon-remove-sign:before { - content: "\e083"; -} - -.glyphicon-ok-sign:before { - content: "\e084"; -} - -.glyphicon-question-sign:before { - content: "\e085"; -} - -.glyphicon-info-sign:before { - content: "\e086"; -} - -.glyphicon-screenshot:before { - content: "\e087"; -} - -.glyphicon-remove-circle:before { - content: "\e088"; -} - -.glyphicon-ok-circle:before { - content: "\e089"; -} - -.glyphicon-ban-circle:before { - content: "\e090"; -} - -.glyphicon-arrow-left:before { - content: "\e091"; -} - -.glyphicon-arrow-right:before { - content: "\e092"; -} - -.glyphicon-arrow-up:before { - content: "\e093"; -} - -.glyphicon-arrow-down:before { - content: "\e094"; -} - -.glyphicon-share-alt:before { - content: "\e095"; -} - -.glyphicon-resize-full:before { - content: "\e096"; -} - -.glyphicon-resize-small:before { - content: "\e097"; -} - -.glyphicon-exclamation-sign:before { - content: "\e101"; -} - -.glyphicon-gift:before { - content: "\e102"; -} - -.glyphicon-leaf:before { - content: "\e103"; -} - -.glyphicon-eye-open:before { - content: "\e105"; -} - -.glyphicon-eye-close:before { - content: "\e106"; -} - -.glyphicon-warning-sign:before { - content: "\e107"; -} - -.glyphicon-plane:before { - content: "\e108"; -} - -.glyphicon-random:before { - content: "\e110"; -} - -.glyphicon-comment:before { - content: "\e111"; -} - -.glyphicon-magnet:before { - content: "\e112"; -} - -.glyphicon-chevron-up:before { - content: "\e113"; -} - -.glyphicon-chevron-down:before { - content: "\e114"; -} - -.glyphicon-retweet:before { - content: "\e115"; -} - -.glyphicon-shopping-cart:before { - content: "\e116"; -} - -.glyphicon-folder-close:before { - content: "\e117"; -} - -.glyphicon-folder-open:before { - content: "\e118"; -} - -.glyphicon-resize-vertical:before { - content: "\e119"; -} - -.glyphicon-resize-horizontal:before { - content: "\e120"; -} - -.glyphicon-hdd:before { - content: "\e121"; -} - -.glyphicon-bullhorn:before { - content: "\e122"; -} - -.glyphicon-certificate:before { - content: "\e124"; -} - -.glyphicon-thumbs-up:before { - content: "\e125"; -} - -.glyphicon-thumbs-down:before { - content: "\e126"; -} - -.glyphicon-hand-right:before { - content: "\e127"; -} - -.glyphicon-hand-left:before { - content: "\e128"; -} - -.glyphicon-hand-up:before { - content: "\e129"; -} - -.glyphicon-hand-down:before { - content: "\e130"; -} - -.glyphicon-circle-arrow-right:before { - content: "\e131"; -} - -.glyphicon-circle-arrow-left:before { - content: "\e132"; -} - -.glyphicon-circle-arrow-up:before { - content: "\e133"; -} - -.glyphicon-circle-arrow-down:before { - content: "\e134"; -} - -.glyphicon-globe:before { - content: "\e135"; -} - -.glyphicon-tasks:before { - content: "\e137"; -} - -.glyphicon-filter:before { - content: "\e138"; -} - -.glyphicon-fullscreen:before { - content: "\e140"; -} - -.glyphicon-dashboard:before { - content: "\e141"; -} - -.glyphicon-heart-empty:before { - content: "\e143"; -} - -.glyphicon-link:before { - content: "\e144"; -} - -.glyphicon-phone:before { - content: "\e145"; -} - -.glyphicon-usd:before { - content: "\e148"; -} - -.glyphicon-gbp:before { - content: "\e149"; -} - -.glyphicon-sort:before { - content: "\e150"; -} - -.glyphicon-sort-by-alphabet:before { - content: "\e151"; -} - -.glyphicon-sort-by-alphabet-alt:before { - content: "\e152"; -} - -.glyphicon-sort-by-order:before { - content: "\e153"; -} - -.glyphicon-sort-by-order-alt:before { - content: "\e154"; -} - -.glyphicon-sort-by-attributes:before { - content: "\e155"; -} - -.glyphicon-sort-by-attributes-alt:before { - content: "\e156"; -} - -.glyphicon-unchecked:before { - content: "\e157"; -} - -.glyphicon-expand:before { - content: "\e158"; -} - -.glyphicon-collapse-down:before { - content: "\e159"; -} - -.glyphicon-collapse-up:before { - content: "\e160"; -} - -.glyphicon-log-in:before { - content: "\e161"; -} - -.glyphicon-flash:before { - content: "\e162"; -} - -.glyphicon-log-out:before { - content: "\e163"; -} - -.glyphicon-new-window:before { - content: "\e164"; -} - -.glyphicon-record:before { - content: "\e165"; -} - -.glyphicon-save:before { - content: "\e166"; -} - -.glyphicon-open:before { - content: "\e167"; -} - -.glyphicon-saved:before { - content: "\e168"; -} - -.glyphicon-import:before { - content: "\e169"; -} - -.glyphicon-export:before { - content: "\e170"; -} - -.glyphicon-send:before { - content: "\e171"; -} - -.glyphicon-floppy-disk:before { - content: "\e172"; -} - -.glyphicon-floppy-saved:before { - content: "\e173"; -} - -.glyphicon-floppy-remove:before { - content: "\e174"; -} - -.glyphicon-floppy-save:before { - content: "\e175"; -} - -.glyphicon-floppy-open:before { - content: "\e176"; -} - -.glyphicon-credit-card:before { - content: "\e177"; -} - -.glyphicon-transfer:before { - content: "\e178"; -} - -.glyphicon-cutlery:before { - content: "\e179"; -} - -.glyphicon-header:before { - content: "\e180"; -} - -.glyphicon-compressed:before { - content: "\e181"; -} - -.glyphicon-earphone:before { - content: "\e182"; -} - -.glyphicon-phone-alt:before { - content: "\e183"; -} - -.glyphicon-tower:before { - content: "\e184"; -} - -.glyphicon-stats:before { - content: "\e185"; -} - -.glyphicon-sd-video:before { - content: "\e186"; -} - -.glyphicon-hd-video:before { - content: "\e187"; -} - -.glyphicon-subtitles:before { - content: "\e188"; -} - -.glyphicon-sound-stereo:before { - content: "\e189"; -} - -.glyphicon-sound-dolby:before { - content: "\e190"; -} - -.glyphicon-sound-5-1:before { - content: "\e191"; -} - -.glyphicon-sound-6-1:before { - content: "\e192"; -} - -.glyphicon-sound-7-1:before { - content: "\e193"; -} - -.glyphicon-copyright-mark:before { - content: "\e194"; -} - -.glyphicon-registration-mark:before { - content: "\e195"; -} - -.glyphicon-cloud-download:before { - content: "\e197"; -} - -.glyphicon-cloud-upload:before { - content: "\e198"; -} - -.glyphicon-tree-conifer:before { - content: "\e199"; -} - -.glyphicon-tree-deciduous:before { - content: "\e200"; -} - -.glyphicon-briefcase:before { - content: "\1f4bc"; -} - -.glyphicon-calendar:before { - content: "\1f4c5"; -} - -.glyphicon-pushpin:before { - content: "\1f4cc"; -} - -.glyphicon-paperclip:before { - content: "\1f4ce"; -} - -.glyphicon-camera:before { - content: "\1f4f7"; -} - -.glyphicon-lock:before { - content: "\1f512"; -} - -.glyphicon-bell:before { - content: "\1f514"; -} - -.glyphicon-bookmark:before { - content: "\1f516"; -} - -.glyphicon-fire:before { - content: "\1f525"; -} - -.glyphicon-wrench:before { - content: "\1f527"; -} - -.caret { - display: inline-block; - width: 0; - height: 0; - margin-left: 2px; - vertical-align: middle; - border-top: 4px solid #000000; - border-right: 4px solid transparent; - border-bottom: 0 dotted; - border-left: 4px solid transparent; - content: ""; -} - -.dropdown { - position: relative; -} - -.dropdown-toggle:focus { - outline: 0; -} - -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - font-size: 14px; - list-style: none; - background-color: #ffffff; - border: 1px solid #cccccc; - border: 1px solid rgba(0, 0, 0, 0.15); - border-radius: 4px; - -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - background-clip: padding-box; -} - -.dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.dropdown-menu .divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} - -.dropdown-menu > li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: normal; - line-height: 1.428571429; - color: #333333; - white-space: nowrap; -} - -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus { - color: #ffffff; - text-decoration: none; - background-color: #428bca; -} - -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - color: #ffffff; - text-decoration: none; - background-color: #428bca; - outline: 0; -} - -.dropdown-menu > .disabled > a, -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - color: #999999; -} - -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - text-decoration: none; - cursor: not-allowed; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.open > .dropdown-menu { - display: block; -} - -.open > a { - outline: 0; -} - -.dropdown-header { - display: block; - padding: 3px 20px; - font-size: 12px; - line-height: 1.428571429; - color: #999999; -} - -.dropdown-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 990; -} - -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} - -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - border-top: 0 dotted; - border-bottom: 4px solid #000000; - content: ""; -} - -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 1px; -} - -@media (min-width: 768px) { - .navbar-right .dropdown-menu { - right: 0; - left: auto; - } -} - -.btn-default .caret { - border-top-color: #333333; -} - -.btn-primary .caret, -.btn-success .caret, -.btn-warning .caret, -.btn-danger .caret, -.btn-info .caret { - border-top-color: #fff; -} - -.dropup .btn-default .caret { - border-bottom-color: #333333; -} - -.dropup .btn-primary .caret, -.dropup .btn-success .caret, -.dropup .btn-warning .caret, -.dropup .btn-danger .caret, -.dropup .btn-info .caret { - border-bottom-color: #fff; -} - -.btn-group, -.btn-group-vertical { - position: relative; - display: inline-block; - vertical-align: middle; -} - -.btn-group > .btn, -.btn-group-vertical > .btn { - position: relative; - float: left; -} - -.btn-group > .btn:hover, -.btn-group-vertical > .btn:hover, -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus, -.btn-group > .btn:active, -.btn-group-vertical > .btn:active, -.btn-group > .btn.active, -.btn-group-vertical > .btn.active { - z-index: 2; -} - -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus { - outline: none; -} - -.btn-group .btn + .btn, -.btn-group .btn + .btn-group, -.btn-group .btn-group + .btn, -.btn-group .btn-group + .btn-group { - margin-left: -1px; -} - -.btn-toolbar:before, -.btn-toolbar:after { - display: table; - content: " "; -} - -.btn-toolbar:after { - clear: both; -} - -.btn-toolbar:before, -.btn-toolbar:after { - display: table; - content: " "; -} - -.btn-toolbar:after { - clear: both; -} - -.btn-toolbar .btn-group { - float: left; -} - -.btn-toolbar > .btn + .btn, -.btn-toolbar > .btn-group + .btn, -.btn-toolbar > .btn + .btn-group, -.btn-toolbar > .btn-group + .btn-group { - margin-left: 5px; -} - -.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { - border-radius: 0; -} - -.btn-group > .btn:first-child { - margin-left: 0; -} - -.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.btn-group > .btn:last-child:not(:first-child), -.btn-group > .dropdown-toggle:not(:first-child) { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.btn-group > .btn-group { - float: left; -} - -.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} - -.btn-group > .btn-group:first-child > .btn:last-child, -.btn-group > .btn-group:first-child > .dropdown-toggle { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.btn-group > .btn-group:last-child > .btn:first-child { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} - -.btn-group-xs > .btn { - padding: 5px 10px; - padding: 1px 5px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-group-sm > .btn { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-group-lg > .btn { - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -.btn-group > .btn + .dropdown-toggle { - padding-right: 8px; - padding-left: 8px; -} - -.btn-group > .btn-lg + .dropdown-toggle { - padding-right: 12px; - padding-left: 12px; -} - -.btn-group.open .dropdown-toggle { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn .caret { - margin-left: 0; -} - -.btn-lg .caret { - border-width: 5px 5px 0; - border-bottom-width: 0; -} - -.dropup .btn-lg .caret { - border-width: 0 5px 5px; -} - -.btn-group-vertical > .btn, -.btn-group-vertical > .btn-group { - display: block; - float: none; - width: 100%; - max-width: 100%; -} - -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after { - display: table; - content: " "; -} - -.btn-group-vertical > .btn-group:after { - clear: both; -} - -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after { - display: table; - content: " "; -} - -.btn-group-vertical > .btn-group:after { - clear: both; -} - -.btn-group-vertical > .btn-group > .btn { - float: none; -} - -.btn-group-vertical > .btn + .btn, -.btn-group-vertical > .btn + .btn-group, -.btn-group-vertical > .btn-group + .btn, -.btn-group-vertical > .btn-group + .btn-group { - margin-top: -1px; - margin-left: 0; -} - -.btn-group-vertical > .btn:not(:first-child):not(:last-child) { - border-radius: 0; -} - -.btn-group-vertical > .btn:first-child:not(:last-child) { - border-top-right-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.btn-group-vertical > .btn:last-child:not(:first-child) { - border-top-right-radius: 0; - border-bottom-left-radius: 4px; - border-top-left-radius: 0; -} - -.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} - -.btn-group-vertical > .btn-group:first-child > .btn:last-child, -.btn-group-vertical > .btn-group:first-child > .dropdown-toggle { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.btn-group-vertical > .btn-group:last-child > .btn:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.btn-group-justified { - display: table; - width: 100%; - border-collapse: separate; - table-layout: fixed; -} - -.btn-group-justified .btn { - display: table-cell; - float: none; - width: 1%; -} - -[data-toggle="buttons"] > .btn > input[type="radio"], -[data-toggle="buttons"] > .btn > input[type="checkbox"] { - display: none; -} - -.input-group { - position: relative; - display: table; - border-collapse: separate; -} - -.input-group.col { - float: none; - padding-right: 0; - padding-left: 0; -} - -.input-group .form-control { - width: 100%; - margin-bottom: 0; -} - -.input-group-lg > .form-control, -.input-group-lg > .input-group-addon, -.input-group-lg > .input-group-btn > .btn { - height: 45px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -select.input-group-lg > .form-control, -select.input-group-lg > .input-group-addon, -select.input-group-lg > .input-group-btn > .btn { - height: 45px; - line-height: 45px; -} - -textarea.input-group-lg > .form-control, -textarea.input-group-lg > .input-group-addon, -textarea.input-group-lg > .input-group-btn > .btn { - height: auto; -} - -.input-group-sm > .form-control, -.input-group-sm > .input-group-addon, -.input-group-sm > .input-group-btn > .btn { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -select.input-group-sm > .form-control, -select.input-group-sm > .input-group-addon, -select.input-group-sm > .input-group-btn > .btn { - height: 30px; - line-height: 30px; -} - -textarea.input-group-sm > .form-control, -textarea.input-group-sm > .input-group-addon, -textarea.input-group-sm > .input-group-btn > .btn { - height: auto; -} - -.input-group-addon, -.input-group-btn, -.input-group .form-control { - display: table-cell; -} - -.input-group-addon:not(:first-child):not(:last-child), -.input-group-btn:not(:first-child):not(:last-child), -.input-group .form-control:not(:first-child):not(:last-child) { - border-radius: 0; -} - -.input-group-addon, -.input-group-btn { - width: 1%; - white-space: nowrap; - vertical-align: middle; -} - -.input-group-addon { - padding: 6px 12px; - font-size: 14px; - font-weight: normal; - line-height: 1; - text-align: center; - background-color: #eeeeee; - border: 1px solid #cccccc; - border-radius: 4px; -} - -.input-group-addon.input-sm { - padding: 5px 10px; - font-size: 12px; - border-radius: 3px; -} - -.input-group-addon.input-lg { - padding: 10px 16px; - font-size: 18px; - border-radius: 6px; -} - -.input-group-addon input[type="radio"], -.input-group-addon input[type="checkbox"] { - margin-top: 0; -} - -.input-group .form-control:first-child, -.input-group-addon:first-child, -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .dropdown-toggle, -.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.input-group-addon:first-child { - border-right: 0; -} - -.input-group .form-control:last-child, -.input-group-addon:last-child, -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .dropdown-toggle, -.input-group-btn:first-child > .btn:not(:first-child) { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.input-group-addon:last-child { - border-left: 0; -} - -.input-group-btn { - position: relative; - white-space: nowrap; -} - -.input-group-btn > .btn { - position: relative; -} - -.input-group-btn > .btn + .btn { - margin-left: -4px; -} - -.input-group-btn > .btn:hover, -.input-group-btn > .btn:active { - z-index: 2; -} - -.nav { - padding-left: 0; - margin-bottom: 0; - list-style: none; -} - -.nav:before, -.nav:after { - display: table; - content: " "; -} - -.nav:after { - clear: both; -} - -.nav:before, -.nav:after { - display: table; - content: " "; -} - -.nav:after { - clear: both; -} - -.nav > li { - position: relative; - display: block; -} - -.nav > li > a { - position: relative; - display: block; - padding: 10px 15px; -} - -.nav > li > a:hover, -.nav > li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} - -.nav > li.disabled > a { - color: #999999; -} - -.nav > li.disabled > a:hover, -.nav > li.disabled > a:focus { - color: #999999; - text-decoration: none; - cursor: not-allowed; - background-color: transparent; -} - -.nav .open > a, -.nav .open > a:hover, -.nav .open > a:focus { - background-color: #eeeeee; - border-color: #428bca; -} - -.nav .nav-divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} - -.nav > li > a > img { - max-width: none; -} - -.nav-tabs { - border-bottom: 1px solid #dddddd; -} - -.nav-tabs > li { - float: left; - margin-bottom: -1px; -} - -.nav-tabs > li > a { - margin-right: 2px; - line-height: 1.428571429; - border: 1px solid transparent; - border-radius: 4px 4px 0 0; -} - -.nav-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #dddddd; -} - -.nav-tabs > li.active > a, -.nav-tabs > li.active > a:hover, -.nav-tabs > li.active > a:focus { - color: #555555; - cursor: default; - background-color: #ffffff; - border: 1px solid #dddddd; - border-bottom-color: transparent; -} - -.nav-tabs.nav-justified { - width: 100%; - border-bottom: 0; -} - -.nav-tabs.nav-justified > li { - float: none; -} - -.nav-tabs.nav-justified > li > a { - text-align: center; -} - -@media (min-width: 768px) { - .nav-tabs.nav-justified > li { - display: table-cell; - width: 1%; - } -} - -.nav-tabs.nav-justified > li > a { - margin-right: 0; - border-bottom: 1px solid #dddddd; -} - -.nav-tabs.nav-justified > .active > a { - border-bottom-color: #ffffff; -} - -.nav-pills > li { - float: left; -} - -.nav-pills > li > a { - border-radius: 5px; -} - -.nav-pills > li + li { - margin-left: 2px; -} - -.nav-pills > li.active > a, -.nav-pills > li.active > a:hover, -.nav-pills > li.active > a:focus { - color: #ffffff; - background-color: #428bca; -} - -.nav-stacked > li { - float: none; -} - -.nav-stacked > li + li { - margin-top: 2px; - margin-left: 0; -} - -.nav-justified { - width: 100%; -} - -.nav-justified > li { - float: none; -} - -.nav-justified > li > a { - text-align: center; -} - -@media (min-width: 768px) { - .nav-justified > li { - display: table-cell; - width: 1%; - } -} - -.nav-tabs-justified { - border-bottom: 0; -} - -.nav-tabs-justified > li > a { - margin-right: 0; - border-bottom: 1px solid #dddddd; -} - -.nav-tabs-justified > .active > a { - border-bottom-color: #ffffff; -} - -.tabbable:before, -.tabbable:after { - display: table; - content: " "; -} - -.tabbable:after { - clear: both; -} - -.tabbable:before, -.tabbable:after { - display: table; - content: " "; -} - -.tabbable:after { - clear: both; -} - -.tab-content > .tab-pane, -.pill-content > .pill-pane { - display: none; -} - -.tab-content > .active, -.pill-content > .active { - display: block; -} - -.nav .caret { - border-top-color: #428bca; - border-bottom-color: #428bca; -} - -.nav a:hover .caret { - border-top-color: #2a6496; - border-bottom-color: #2a6496; -} - -.nav-tabs .dropdown-menu { - margin-top: -1px; - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.navbar { - position: relative; - z-index: 1000; - min-height: 50px; - margin-bottom: 20px; - border: 1px solid transparent; -} - -.navbar:before, -.navbar:after { - display: table; - content: " "; -} - -.navbar:after { - clear: both; -} - -.navbar:before, -.navbar:after { - display: table; - content: " "; -} - -.navbar:after { - clear: both; -} - -@media (min-width: 768px) { - .navbar { - border-radius: 4px; - } -} - -.navbar-header:before, -.navbar-header:after { - display: table; - content: " "; -} - -.navbar-header:after { - clear: both; -} - -.navbar-header:before, -.navbar-header:after { - display: table; - content: " "; -} - -.navbar-header:after { - clear: both; -} - -@media (min-width: 768px) { - .navbar-header { - float: left; - } -} - -.navbar-collapse { - max-height: 340px; - padding-right: 15px; - padding-left: 15px; - overflow-x: visible; - border-top: 1px solid transparent; - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - -webkit-overflow-scrolling: touch; -} - -.navbar-collapse:before, -.navbar-collapse:after { - display: table; - content: " "; -} - -.navbar-collapse:after { - clear: both; -} - -.navbar-collapse:before, -.navbar-collapse:after { - display: table; - content: " "; -} - -.navbar-collapse:after { - clear: both; -} - -.navbar-collapse.in { - overflow-y: auto; -} - -@media (min-width: 768px) { - .navbar-collapse { - width: auto; - border-top: 0; - box-shadow: none; - } - .navbar-collapse.collapse { - display: block !important; - height: auto !important; - padding-bottom: 0; - overflow: visible !important; - } - .navbar-collapse.in { - overflow-y: visible; - } - .navbar-collapse .navbar-nav.navbar-left:first-child { - margin-left: -15px; - } - .navbar-collapse .navbar-nav.navbar-right:last-child { - margin-right: -15px; - } - .navbar-collapse .navbar-text:last-child { - margin-right: 0; - } -} - -.container > .navbar-header, -.container > .navbar-collapse { - margin-right: -15px; - margin-left: -15px; -} - -@media (min-width: 768px) { - .container > .navbar-header, - .container > .navbar-collapse { - margin-right: 0; - margin-left: 0; - } -} - -.navbar-static-top { - border-width: 0 0 1px; -} - -@media (min-width: 768px) { - .navbar-static-top { - border-radius: 0; - } -} - -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - border-width: 0 0 1px; -} - -@media (min-width: 768px) { - .navbar-fixed-top, - .navbar-fixed-bottom { - border-radius: 0; - } -} - -.navbar-fixed-top { - top: 0; - z-index: 1030; -} - -.navbar-fixed-bottom { - bottom: 0; - margin-bottom: 0; -} - -.navbar-brand { - float: left; - padding: 15px 15px; - font-size: 18px; - line-height: 20px; -} - -.navbar-brand:hover, -.navbar-brand:focus { - text-decoration: none; -} - -@media (min-width: 768px) { - .navbar > .container .navbar-brand { - margin-left: -15px; - } -} - -.navbar-toggle { - position: relative; - float: right; - padding: 9px 10px; - margin-top: 8px; - margin-right: 15px; - margin-bottom: 8px; - background-color: transparent; - border: 1px solid transparent; - border-radius: 4px; -} - -.navbar-toggle .icon-bar { - display: block; - width: 22px; - height: 2px; - border-radius: 1px; -} - -.navbar-toggle .icon-bar + .icon-bar { - margin-top: 4px; -} - -@media (min-width: 768px) { - .navbar-toggle { - display: none; - } -} - -.navbar-nav { - margin: 7.5px -15px; -} - -.navbar-nav > li > a { - padding-top: 10px; - padding-bottom: 10px; - line-height: 20px; -} - -@media (max-width: 767px) { - .navbar-nav .open .dropdown-menu { - position: static; - float: none; - width: auto; - margin-top: 0; - background-color: transparent; - border: 0; - box-shadow: none; - } - .navbar-nav .open .dropdown-menu > li > a, - .navbar-nav .open .dropdown-menu .dropdown-header { - padding: 5px 15px 5px 25px; - } - .navbar-nav .open .dropdown-menu > li > a { - line-height: 20px; - } - .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-nav .open .dropdown-menu > li > a:focus { - background-image: none; - } -} - -@media (min-width: 768px) { - .navbar-nav { - float: left; - margin: 0; - } - .navbar-nav > li { - float: left; - } - .navbar-nav > li > a { - padding-top: 15px; - padding-bottom: 15px; - } -} - -@media (min-width: 768px) { - .navbar-left { - float: left !important; - } - .navbar-right { - float: right !important; - } -} - -.navbar-form { - padding: 10px 15px; - margin-top: 8px; - margin-right: -15px; - margin-bottom: 8px; - margin-left: -15px; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); -} - -@media (min-width: 768px) { - .navbar-form .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .form-control { - display: inline-block; - } - .navbar-form .radio, - .navbar-form .checkbox { - display: inline-block; - padding-left: 0; - margin-top: 0; - margin-bottom: 0; - } - .navbar-form .radio input[type="radio"], - .navbar-form .checkbox input[type="checkbox"] { - float: none; - margin-left: 0; - } -} - -@media (max-width: 767px) { - .navbar-form .form-group { - margin-bottom: 5px; - } -} - -@media (min-width: 768px) { - .navbar-form { - width: auto; - padding-top: 0; - padding-bottom: 0; - margin-right: 0; - margin-left: 0; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } -} - -.navbar-nav > li > .dropdown-menu { - margin-top: 0; - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.navbar-nav.pull-right > li > .dropdown-menu, -.navbar-nav > li > .dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.navbar-btn { - margin-top: 8px; - margin-bottom: 8px; -} - -.navbar-text { - float: left; - margin-top: 15px; - margin-bottom: 15px; -} - -@media (min-width: 768px) { - .navbar-text { - margin-right: 15px; - margin-left: 15px; - } -} - -.navbar-default { - background-color: #f8f8f8; - border-color: #e7e7e7; -} - -.navbar-default .navbar-brand { - color: #777777; -} - -.navbar-default .navbar-brand:hover, -.navbar-default .navbar-brand:focus { - color: #5e5e5e; - background-color: transparent; -} - -.navbar-default .navbar-text { - color: #777777; -} - -.navbar-default .navbar-nav > li > a { - color: #777777; -} - -.navbar-default .navbar-nav > li > a:hover, -.navbar-default .navbar-nav > li > a:focus { - color: #333333; - background-color: transparent; -} - -.navbar-default .navbar-nav > .active > a, -.navbar-default .navbar-nav > .active > a:hover, -.navbar-default .navbar-nav > .active > a:focus { - color: #555555; - background-color: #e7e7e7; -} - -.navbar-default .navbar-nav > .disabled > a, -.navbar-default .navbar-nav > .disabled > a:hover, -.navbar-default .navbar-nav > .disabled > a:focus { - color: #cccccc; - background-color: transparent; -} - -.navbar-default .navbar-toggle { - border-color: #dddddd; -} - -.navbar-default .navbar-toggle:hover, -.navbar-default .navbar-toggle:focus { - background-color: #dddddd; -} - -.navbar-default .navbar-toggle .icon-bar { - background-color: #cccccc; -} - -.navbar-default .navbar-collapse, -.navbar-default .navbar-form { - border-color: #e6e6e6; -} - -.navbar-default .navbar-nav > .dropdown > a:hover .caret, -.navbar-default .navbar-nav > .dropdown > a:focus .caret { - border-top-color: #333333; - border-bottom-color: #333333; -} - -.navbar-default .navbar-nav > .open > a, -.navbar-default .navbar-nav > .open > a:hover, -.navbar-default .navbar-nav > .open > a:focus { - color: #555555; - background-color: #e7e7e7; -} - -.navbar-default .navbar-nav > .open > a .caret, -.navbar-default .navbar-nav > .open > a:hover .caret, -.navbar-default .navbar-nav > .open > a:focus .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.navbar-default .navbar-nav > .dropdown > a .caret { - border-top-color: #777777; - border-bottom-color: #777777; -} - -@media (max-width: 767px) { - .navbar-default .navbar-nav .open .dropdown-menu > li > a { - color: #777777; - } - .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { - color: #333333; - background-color: transparent; - } - .navbar-default .navbar-nav .open .dropdown-menu > .active > a, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #555555; - background-color: #e7e7e7; - } - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #cccccc; - background-color: transparent; - } -} - -.navbar-default .navbar-link { - color: #777777; -} - -.navbar-default .navbar-link:hover { - color: #333333; -} - -.navbar-inverse { - background-color: #222222; - border-color: #080808; -} - -.navbar-inverse .navbar-brand { - color: #999999; -} - -.navbar-inverse .navbar-brand:hover, -.navbar-inverse .navbar-brand:focus { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .navbar-text { - color: #999999; -} - -.navbar-inverse .navbar-nav > li > a { - color: #999999; -} - -.navbar-inverse .navbar-nav > li > a:hover, -.navbar-inverse .navbar-nav > li > a:focus { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .navbar-nav > .active > a, -.navbar-inverse .navbar-nav > .active > a:hover, -.navbar-inverse .navbar-nav > .active > a:focus { - color: #ffffff; - background-color: #080808; -} - -.navbar-inverse .navbar-nav > .disabled > a, -.navbar-inverse .navbar-nav > .disabled > a:hover, -.navbar-inverse .navbar-nav > .disabled > a:focus { - color: #444444; - background-color: transparent; -} - -.navbar-inverse .navbar-toggle { - border-color: #333333; -} - -.navbar-inverse .navbar-toggle:hover, -.navbar-inverse .navbar-toggle:focus { - background-color: #333333; -} - -.navbar-inverse .navbar-toggle .icon-bar { - background-color: #ffffff; -} - -.navbar-inverse .navbar-collapse, -.navbar-inverse .navbar-form { - border-color: #101010; -} - -.navbar-inverse .navbar-nav > .open > a, -.navbar-inverse .navbar-nav > .open > a:hover, -.navbar-inverse .navbar-nav > .open > a:focus { - color: #ffffff; - background-color: #080808; -} - -.navbar-inverse .navbar-nav > .dropdown > a:hover .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.navbar-inverse .navbar-nav > .dropdown > a .caret { - border-top-color: #999999; - border-bottom-color: #999999; -} - -.navbar-inverse .navbar-nav > .open > a .caret, -.navbar-inverse .navbar-nav > .open > a:hover .caret, -.navbar-inverse .navbar-nav > .open > a:focus .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -@media (max-width: 767px) { - .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { - border-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { - color: #999999; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { - color: #ffffff; - background-color: transparent; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #ffffff; - background-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #444444; - background-color: transparent; - } -} - -.navbar-inverse .navbar-link { - color: #999999; -} - -.navbar-inverse .navbar-link:hover { - color: #ffffff; -} - -.breadcrumb { - padding: 8px 15px; - margin-bottom: 20px; - list-style: none; - background-color: #f5f5f5; - border-radius: 4px; -} - -.breadcrumb > li { - display: inline-block; -} - -.breadcrumb > li + li:before { - padding: 0 5px; - color: #cccccc; - content: "/\00a0"; -} - -.breadcrumb > .active { - color: #999999; -} - -.pagination { - display: inline-block; - padding-left: 0; - margin: 20px 0; - border-radius: 4px; -} - -.pagination > li { - display: inline; -} - -.pagination > li > a, -.pagination > li > span { - position: relative; - float: left; - padding: 6px 12px; - margin-left: -1px; - line-height: 1.428571429; - text-decoration: none; - background-color: #ffffff; - border: 1px solid #dddddd; -} - -.pagination > li:first-child > a, -.pagination > li:first-child > span { - margin-left: 0; - border-bottom-left-radius: 4px; - border-top-left-radius: 4px; -} - -.pagination > li:last-child > a, -.pagination > li:last-child > span { - border-top-right-radius: 4px; - border-bottom-right-radius: 4px; -} - -.pagination > li > a:hover, -.pagination > li > span:hover, -.pagination > li > a:focus, -.pagination > li > span:focus { - background-color: #eeeeee; -} - -.pagination > .active > a, -.pagination > .active > span, -.pagination > .active > a:hover, -.pagination > .active > span:hover, -.pagination > .active > a:focus, -.pagination > .active > span:focus { - z-index: 2; - color: #ffffff; - cursor: default; - background-color: #428bca; - border-color: #428bca; -} - -.pagination > .disabled > span, -.pagination > .disabled > a, -.pagination > .disabled > a:hover, -.pagination > .disabled > a:focus { - color: #999999; - cursor: not-allowed; - background-color: #ffffff; - border-color: #dddddd; -} - -.pagination-lg > li > a, -.pagination-lg > li > span { - padding: 10px 16px; - font-size: 18px; -} - -.pagination-lg > li:first-child > a, -.pagination-lg > li:first-child > span { - border-bottom-left-radius: 6px; - border-top-left-radius: 6px; -} - -.pagination-lg > li:last-child > a, -.pagination-lg > li:last-child > span { - border-top-right-radius: 6px; - border-bottom-right-radius: 6px; -} - -.pagination-sm > li > a, -.pagination-sm > li > span { - padding: 5px 10px; - font-size: 12px; -} - -.pagination-sm > li:first-child > a, -.pagination-sm > li:first-child > span { - border-bottom-left-radius: 3px; - border-top-left-radius: 3px; -} - -.pagination-sm > li:last-child > a, -.pagination-sm > li:last-child > span { - border-top-right-radius: 3px; - border-bottom-right-radius: 3px; -} - -.pager { - padding-left: 0; - margin: 20px 0; - text-align: center; - list-style: none; -} - -.pager:before, -.pager:after { - display: table; - content: " "; -} - -.pager:after { - clear: both; -} - -.pager:before, -.pager:after { - display: table; - content: " "; -} - -.pager:after { - clear: both; -} - -.pager li { - display: inline; -} - -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 15px; -} - -.pager li > a:hover, -.pager li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} - -.pager .next > a, -.pager .next > span { - float: right; -} - -.pager .previous > a, -.pager .previous > span { - float: left; -} - -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > a:focus, -.pager .disabled > span { - color: #999999; - cursor: not-allowed; - background-color: #ffffff; -} - -.label { - display: inline; - padding: .2em .6em .3em; - font-size: 75%; - font-weight: bold; - line-height: 1; - color: #ffffff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: .25em; -} - -.label[href]:hover, -.label[href]:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.label:empty { - display: none; -} - -.label-default { - background-color: #999999; -} - -.label-default[href]:hover, -.label-default[href]:focus { - background-color: #808080; -} - -.label-primary { - background-color: #428bca; -} - -.label-primary[href]:hover, -.label-primary[href]:focus { - background-color: #3071a9; -} - -.label-success { - background-color: #5cb85c; -} - -.label-success[href]:hover, -.label-success[href]:focus { - background-color: #449d44; -} - -.label-info { - background-color: #5bc0de; -} - -.label-info[href]:hover, -.label-info[href]:focus { - background-color: #31b0d5; -} - -.label-warning { - background-color: #f0ad4e; -} - -.label-warning[href]:hover, -.label-warning[href]:focus { - background-color: #ec971f; -} - -.label-danger { - background-color: #d9534f; -} - -.label-danger[href]:hover, -.label-danger[href]:focus { - background-color: #c9302c; -} - -.badge { - display: inline-block; - min-width: 10px; - padding: 3px 7px; - font-size: 12px; - font-weight: bold; - line-height: 1; - color: #ffffff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - background-color: #999999; - border-radius: 10px; -} - -.badge:empty { - display: none; -} - -a.badge:hover, -a.badge:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.btn .badge { - position: relative; - top: -1px; -} - -a.list-group-item.active > .badge, -.nav-pills > .active > a > .badge { - color: #428bca; - background-color: #ffffff; -} - -.nav-pills > li > a > .badge { - margin-left: 3px; -} - -.jumbotron { - padding: 30px; - margin-bottom: 30px; - font-size: 21px; - font-weight: 200; - line-height: 2.1428571435; - color: inherit; - background-color: #eeeeee; -} - -.jumbotron h1 { - line-height: 1; - color: inherit; -} - -.jumbotron p { - line-height: 1.4; -} - -.container .jumbotron { - border-radius: 6px; -} - -@media screen and (min-width: 768px) { - .jumbotron { - padding-top: 48px; - padding-bottom: 48px; - } - .container .jumbotron { - padding-right: 60px; - padding-left: 60px; - } - .jumbotron h1 { - font-size: 63px; - } -} - -.thumbnail { - display: inline-block; - display: block; - height: auto; - max-width: 100%; - padding: 4px; - line-height: 1.428571429; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -.thumbnail > img { - display: block; - height: auto; - max-width: 100%; -} - -a.thumbnail:hover, -a.thumbnail:focus { - border-color: #428bca; -} - -.thumbnail > img { - margin-right: auto; - margin-left: auto; -} - -.thumbnail .caption { - padding: 9px; - color: #333333; -} - -.alert { - padding: 15px; - margin-bottom: 20px; - border: 1px solid transparent; - border-radius: 4px; -} - -.alert h4 { - margin-top: 0; - color: inherit; -} - -.alert .alert-link { - font-weight: bold; -} - -.alert > p, -.alert > ul { - margin-bottom: 0; -} - -.alert > p + p { - margin-top: 5px; -} - -.alert-dismissable { - padding-right: 35px; -} - -.alert-dismissable .close { - position: relative; - top: -2px; - right: -21px; - color: inherit; -} - -.alert-success { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.alert-success hr { - border-top-color: #c9e2b3; -} - -.alert-success .alert-link { - color: #356635; -} - -.alert-info { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.alert-info hr { - border-top-color: #a6e1ec; -} - -.alert-info .alert-link { - color: #2d6987; -} - -.alert-warning { - color: #c09853; - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.alert-warning hr { - border-top-color: #f8e5be; -} - -.alert-warning .alert-link { - color: #a47e3c; -} - -.alert-danger { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.alert-danger hr { - border-top-color: #e6c1c7; -} - -.alert-danger .alert-link { - color: #953b39; -} - -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-moz-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-o-keyframes progress-bar-stripes { - from { - background-position: 0 0; - } - to { - background-position: 40px 0; - } -} - -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -.progress { - height: 20px; - margin-bottom: 20px; - overflow: hidden; - background-color: #f5f5f5; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); -} - -.progress-bar { - float: left; - width: 0; - height: 100%; - font-size: 12px; - color: #ffffff; - text-align: center; - background-color: #428bca; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -webkit-transition: width 0.6s ease; - transition: width 0.6s ease; -} - -.progress-striped .progress-bar { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-size: 40px 40px; -} - -.progress.active .progress-bar { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -moz-animation: progress-bar-stripes 2s linear infinite; - -ms-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} - -.progress-bar-success { - background-color: #5cb85c; -} - -.progress-striped .progress-bar-success { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-info { - background-color: #5bc0de; -} - -.progress-striped .progress-bar-info { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-warning { - background-color: #f0ad4e; -} - -.progress-striped .progress-bar-warning { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-danger { - background-color: #d9534f; -} - -.progress-striped .progress-bar-danger { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.media, -.media-body { - overflow: hidden; - zoom: 1; -} - -.media, -.media .media { - margin-top: 15px; -} - -.media:first-child { - margin-top: 0; -} - -.media-object { - display: block; -} - -.media-heading { - margin: 0 0 5px; -} - -.media > .pull-left { - margin-right: 10px; -} - -.media > .pull-right { - margin-left: 10px; -} - -.media-list { - padding-left: 0; - list-style: none; -} - -.list-group { - padding-left: 0; - margin-bottom: 20px; -} - -.list-group-item { - position: relative; - display: block; - padding: 10px 15px; - margin-bottom: -1px; - background-color: #ffffff; - border: 1px solid #dddddd; -} - -.list-group-item:first-child { - border-top-right-radius: 4px; - border-top-left-radius: 4px; -} - -.list-group-item:last-child { - margin-bottom: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} - -.list-group-item > .badge { - float: right; -} - -.list-group-item > .badge + .badge { - margin-right: 5px; -} - -a.list-group-item { - color: #555555; -} - -a.list-group-item .list-group-item-heading { - color: #333333; -} - -a.list-group-item:hover, -a.list-group-item:focus { - text-decoration: none; - background-color: #f5f5f5; -} - -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - z-index: 2; - color: #ffffff; - background-color: #428bca; - border-color: #428bca; -} - -.list-group-item.active .list-group-item-heading, -.list-group-item.active:hover .list-group-item-heading, -.list-group-item.active:focus .list-group-item-heading { - color: inherit; -} - -.list-group-item.active .list-group-item-text, -.list-group-item.active:hover .list-group-item-text, -.list-group-item.active:focus .list-group-item-text { - color: #e1edf7; -} - -.list-group-item-heading { - margin-top: 0; - margin-bottom: 5px; -} - -.list-group-item-text { - margin-bottom: 0; - line-height: 1.3; -} - -.panel { - margin-bottom: 20px; - background-color: #ffffff; - border: 1px solid transparent; - border-radius: 4px; - -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.panel-body { - padding: 15px; -} - -.panel-body:before, -.panel-body:after { - display: table; - content: " "; -} - -.panel-body:after { - clear: both; -} - -.panel-body:before, -.panel-body:after { - display: table; - content: " "; -} - -.panel-body:after { - clear: both; -} - -.panel > .list-group { - margin-bottom: 0; -} - -.panel > .list-group .list-group-item { - border-width: 1px 0; -} - -.panel > .list-group .list-group-item:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.panel > .list-group .list-group-item:last-child { - border-bottom: 0; -} - -.panel-heading + .list-group .list-group-item:first-child { - border-top-width: 0; -} - -.panel > .table { - margin-bottom: 0; -} - -.panel > .panel-body + .table { - border-top: 1px solid #dddddd; -} - -.panel-heading { - padding: 10px 15px; - border-bottom: 1px solid transparent; - border-top-right-radius: 3px; - border-top-left-radius: 3px; -} - -.panel-title { - margin-top: 0; - margin-bottom: 0; - font-size: 16px; -} - -.panel-title > a { - color: inherit; -} - -.panel-footer { - padding: 10px 15px; - background-color: #f5f5f5; - border-top: 1px solid #dddddd; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} - -.panel-group .panel { - margin-bottom: 0; - overflow: hidden; - border-radius: 4px; -} - -.panel-group .panel + .panel { - margin-top: 5px; -} - -.panel-group .panel-heading { - border-bottom: 0; -} - -.panel-group .panel-heading + .panel-collapse .panel-body { - border-top: 1px solid #dddddd; -} - -.panel-group .panel-footer { - border-top: 0; -} - -.panel-group .panel-footer + .panel-collapse .panel-body { - border-bottom: 1px solid #dddddd; -} - -.panel-default { - border-color: #dddddd; -} - -.panel-default > .panel-heading { - color: #333333; - background-color: #f5f5f5; - border-color: #dddddd; -} - -.panel-default > .panel-heading + .panel-collapse .panel-body { - border-top-color: #dddddd; -} - -.panel-default > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #dddddd; -} - -.panel-primary { - border-color: #428bca; -} - -.panel-primary > .panel-heading { - color: #ffffff; - background-color: #428bca; - border-color: #428bca; -} - -.panel-primary > .panel-heading + .panel-collapse .panel-body { - border-top-color: #428bca; -} - -.panel-primary > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #428bca; -} - -.panel-success { - border-color: #d6e9c6; -} - -.panel-success > .panel-heading { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.panel-success > .panel-heading + .panel-collapse .panel-body { - border-top-color: #d6e9c6; -} - -.panel-success > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #d6e9c6; -} - -.panel-warning { - border-color: #fbeed5; -} - -.panel-warning > .panel-heading { - color: #c09853; - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.panel-warning > .panel-heading + .panel-collapse .panel-body { - border-top-color: #fbeed5; -} - -.panel-warning > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #fbeed5; -} - -.panel-danger { - border-color: #eed3d7; -} - -.panel-danger > .panel-heading { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.panel-danger > .panel-heading + .panel-collapse .panel-body { - border-top-color: #eed3d7; -} - -.panel-danger > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #eed3d7; -} - -.panel-info { - border-color: #bce8f1; -} - -.panel-info > .panel-heading { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.panel-info > .panel-heading + .panel-collapse .panel-body { - border-top-color: #bce8f1; -} - -.panel-info > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #bce8f1; -} - -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #f5f5f5; - border: 1px solid #e3e3e3; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, 0.15); -} - -.well-lg { - padding: 24px; - border-radius: 6px; -} - -.well-sm { - padding: 9px; - border-radius: 3px; -} - -.close { - float: right; - font-size: 21px; - font-weight: bold; - line-height: 1; - color: #000000; - text-shadow: 0 1px 0 #ffffff; - opacity: 0.2; - filter: alpha(opacity=20); -} - -.close:hover, -.close:focus { - color: #000000; - text-decoration: none; - cursor: pointer; - opacity: 0.5; - filter: alpha(opacity=50); -} - -button.close { - padding: 0; - cursor: pointer; - background: transparent; - border: 0; - -webkit-appearance: none; -} - -.modal-open { - overflow: hidden; -} - -body.modal-open, -.modal-open .navbar-fixed-top, -.modal-open .navbar-fixed-bottom { - margin-right: 15px; -} - -.modal { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - display: none; - overflow: auto; - overflow-y: scroll; -} - -.modal.fade .modal-dialog { - -webkit-transform: translate(0, -25%); - -ms-transform: translate(0, -25%); - transform: translate(0, -25%); - -webkit-transition: -webkit-transform 0.3s ease-out; - -moz-transition: -moz-transform 0.3s ease-out; - -o-transition: -o-transform 0.3s ease-out; - transition: transform 0.3s ease-out; -} - -.modal.in .modal-dialog { - -webkit-transform: translate(0, 0); - -ms-transform: translate(0, 0); - transform: translate(0, 0); -} - -.modal-dialog { - z-index: 1050; - width: auto; - padding: 10px; - margin-right: auto; - margin-left: auto; -} - -.modal-content { - position: relative; - background-color: #ffffff; - border: 1px solid #999999; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - outline: none; - -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - background-clip: padding-box; -} - -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1030; - background-color: #000000; -} - -.modal-backdrop.fade { - opacity: 0; - filter: alpha(opacity=0); -} - -.modal-backdrop.in { - opacity: 0.5; - filter: alpha(opacity=50); -} - -.modal-header { - min-height: 16.428571429px; - padding: 15px; - border-bottom: 1px solid #e5e5e5; -} - -.modal-header .close { - margin-top: -2px; -} - -.modal-title { - margin: 0; - line-height: 1.428571429; -} - -.modal-body { - position: relative; - padding: 20px; -} - -.modal-footer { - padding: 19px 20px 20px; - margin-top: 15px; - text-align: right; - border-top: 1px solid #e5e5e5; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer .btn + .btn { - margin-bottom: 0; - margin-left: 5px; -} - -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} - -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} - -@media screen and (min-width: 768px) { - .modal-dialog { - right: auto; - left: 50%; - width: 600px; - padding-top: 30px; - padding-bottom: 30px; - } - .modal-content { - -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - } -} - -.tooltip { - position: absolute; - z-index: 1030; - display: block; - font-size: 12px; - line-height: 1.4; - opacity: 0; - filter: alpha(opacity=0); - visibility: visible; -} - -.tooltip.in { - opacity: 0.9; - filter: alpha(opacity=90); -} - -.tooltip.top { - padding: 5px 0; - margin-top: -3px; -} - -.tooltip.right { - padding: 0 5px; - margin-left: 3px; -} - -.tooltip.bottom { - padding: 5px 0; - margin-top: 3px; -} - -.tooltip.left { - padding: 0 5px; - margin-left: -3px; -} - -.tooltip-inner { - max-width: 200px; - padding: 3px 8px; - color: #ffffff; - text-align: center; - text-decoration: none; - background-color: #000000; - border-radius: 4px; -} - -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.top-left .tooltip-arrow { - bottom: 0; - left: 5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.top-right .tooltip-arrow { - right: 5px; - bottom: 0; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-right-color: #000000; - border-width: 5px 5px 5px 0; -} - -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-left-color: #000000; - border-width: 5px 0 5px 5px; -} - -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.tooltip.bottom-left .tooltip-arrow { - top: 0; - left: 5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.tooltip.bottom-right .tooltip-arrow { - top: 0; - right: 5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1010; - display: none; - max-width: 276px; - padding: 1px; - text-align: left; - white-space: normal; - background-color: #ffffff; - border: 1px solid #cccccc; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - background-clip: padding-box; -} - -.popover.top { - margin-top: -10px; -} - -.popover.right { - margin-left: 10px; -} - -.popover.bottom { - margin-top: 10px; -} - -.popover.left { - margin-left: -10px; -} - -.popover-title { - padding: 8px 14px; - margin: 0; - font-size: 14px; - font-weight: normal; - line-height: 18px; - background-color: #f7f7f7; - border-bottom: 1px solid #ebebeb; - border-radius: 5px 5px 0 0; -} - -.popover-content { - padding: 9px 14px; -} - -.popover .arrow, -.popover .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.popover .arrow { - border-width: 11px; -} - -.popover .arrow:after { - border-width: 10px; - content: ""; -} - -.popover.top .arrow { - bottom: -11px; - left: 50%; - margin-left: -11px; - border-top-color: #999999; - border-top-color: rgba(0, 0, 0, 0.25); - border-bottom-width: 0; -} - -.popover.top .arrow:after { - bottom: 1px; - margin-left: -10px; - border-top-color: #ffffff; - border-bottom-width: 0; - content: " "; -} - -.popover.right .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-right-color: #999999; - border-right-color: rgba(0, 0, 0, 0.25); - border-left-width: 0; -} - -.popover.right .arrow:after { - bottom: -10px; - left: 1px; - border-right-color: #ffffff; - border-left-width: 0; - content: " "; -} - -.popover.bottom .arrow { - top: -11px; - left: 50%; - margin-left: -11px; - border-bottom-color: #999999; - border-bottom-color: rgba(0, 0, 0, 0.25); - border-top-width: 0; -} - -.popover.bottom .arrow:after { - top: 1px; - margin-left: -10px; - border-bottom-color: #ffffff; - border-top-width: 0; - content: " "; -} - -.popover.left .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-left-color: #999999; - border-left-color: rgba(0, 0, 0, 0.25); - border-right-width: 0; -} - -.popover.left .arrow:after { - right: 1px; - bottom: -10px; - border-left-color: #ffffff; - border-right-width: 0; - content: " "; -} - -.carousel { - position: relative; -} - -.carousel-inner { - position: relative; - width: 100%; - overflow: hidden; -} - -.carousel-inner > .item { - position: relative; - display: none; - -webkit-transition: 0.6s ease-in-out left; - transition: 0.6s ease-in-out left; -} - -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - display: block; - height: auto; - max-width: 100%; - line-height: 1; -} - -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} - -.carousel-inner > .active { - left: 0; -} - -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} - -.carousel-inner > .next { - left: 100%; -} - -.carousel-inner > .prev { - left: -100%; -} - -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} - -.carousel-inner > .active.left { - left: -100%; -} - -.carousel-inner > .active.right { - left: 100%; -} - -.carousel-control { - position: absolute; - top: 0; - bottom: 0; - left: 0; - width: 15%; - font-size: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); - opacity: 0.5; - filter: alpha(opacity=50); -} - -.carousel-control.left { - background-image: -webkit-gradient(linear, 0 top, 100% top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001))); - background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, 0.5) 0), color-stop(rgba(0, 0, 0, 0.0001) 100%)); - background-image: -moz-linear-gradient(left, rgba(0, 0, 0, 0.5) 0, rgba(0, 0, 0, 0.0001) 100%); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0, rgba(0, 0, 0, 0.0001) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); -} - -.carousel-control.right { - right: 0; - left: auto; - background-image: -webkit-gradient(linear, 0 top, 100% top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5))); - background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, 0.0001) 0), color-stop(rgba(0, 0, 0, 0.5) 100%)); - background-image: -moz-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0, rgba(0, 0, 0, 0.5) 100%); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0, rgba(0, 0, 0, 0.5) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); -} - -.carousel-control:hover, -.carousel-control:focus { - color: #ffffff; - text-decoration: none; - opacity: 0.9; - filter: alpha(opacity=90); -} - -.carousel-control .icon-prev, -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-left, -.carousel-control .glyphicon-chevron-right { - position: absolute; - top: 50%; - left: 50%; - z-index: 5; - display: inline-block; -} - -.carousel-control .icon-prev, -.carousel-control .icon-next { - width: 20px; - height: 20px; - margin-top: -10px; - margin-left: -10px; - font-family: serif; -} - -.carousel-control .icon-prev:before { - content: '\2039'; -} - -.carousel-control .icon-next:before { - content: '\203a'; -} - -.carousel-indicators { - position: absolute; - bottom: 10px; - left: 50%; - z-index: 15; - width: 60%; - padding-left: 0; - margin-left: -30%; - text-align: center; - list-style: none; -} - -.carousel-indicators li { - display: inline-block; - width: 10px; - height: 10px; - margin: 1px; - text-indent: -999px; - cursor: pointer; - border: 1px solid #ffffff; - border-radius: 10px; -} - -.carousel-indicators .active { - width: 12px; - height: 12px; - margin: 0; - background-color: #ffffff; -} - -.carousel-caption { - position: absolute; - right: 15%; - bottom: 20px; - left: 15%; - z-index: 10; - padding-top: 20px; - padding-bottom: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); -} - -.carousel-caption .btn { - text-shadow: none; -} - -@media screen and (min-width: 768px) { - .carousel-control .icon-prev, - .carousel-control .icon-next { - width: 30px; - height: 30px; - margin-top: -15px; - margin-left: -15px; - font-size: 30px; - } - .carousel-caption { - right: 20%; - left: 20%; - padding-bottom: 30px; - } - .carousel-indicators { - bottom: 20px; - } -} - -.clearfix:before, -.clearfix:after { - display: table; - content: " "; -} - -.clearfix:after { - clear: both; -} - -.pull-right { - float: right !important; -} - -.pull-left { - float: left !important; -} - -.hide { - display: none !important; -} - -.show { - display: block !important; -} - -.invisible { - visibility: hidden; -} - -.text-hide { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} - -.affix { - position: fixed; -} - -@-ms-viewport { - width: device-width; -} - -@media screen and (max-width: 400px) { - @-ms-viewport { - width: 320px; - } -} - -.hidden { - display: none !important; - visibility: hidden !important; -} - -.visible-xs { - display: none !important; -} - -tr.visible-xs { - display: none !important; -} - -th.visible-xs, -td.visible-xs { - display: none !important; -} - -@media (max-width: 767px) { - .visible-xs { - display: block !important; - } - tr.visible-xs { - display: table-row !important; - } - th.visible-xs, - td.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-xs.visible-sm { - display: block !important; - } - tr.visible-xs.visible-sm { - display: table-row !important; - } - th.visible-xs.visible-sm, - td.visible-xs.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-xs.visible-md { - display: block !important; - } - tr.visible-xs.visible-md { - display: table-row !important; - } - th.visible-xs.visible-md, - td.visible-xs.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-xs.visible-lg { - display: block !important; - } - tr.visible-xs.visible-lg { - display: table-row !important; - } - th.visible-xs.visible-lg, - td.visible-xs.visible-lg { - display: table-cell !important; - } -} - -.visible-sm { - display: none !important; -} - -tr.visible-sm { - display: none !important; -} - -th.visible-sm, -td.visible-sm { - display: none !important; -} - -@media (max-width: 767px) { - .visible-sm.visible-xs { - display: block !important; - } - tr.visible-sm.visible-xs { - display: table-row !important; - } - th.visible-sm.visible-xs, - td.visible-sm.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm { - display: block !important; - } - tr.visible-sm { - display: table-row !important; - } - th.visible-sm, - td.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-sm.visible-md { - display: block !important; - } - tr.visible-sm.visible-md { - display: table-row !important; - } - th.visible-sm.visible-md, - td.visible-sm.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-sm.visible-lg { - display: block !important; - } - tr.visible-sm.visible-lg { - display: table-row !important; - } - th.visible-sm.visible-lg, - td.visible-sm.visible-lg { - display: table-cell !important; - } -} - -.visible-md { - display: none !important; -} - -tr.visible-md { - display: none !important; -} - -th.visible-md, -td.visible-md { - display: none !important; -} - -@media (max-width: 767px) { - .visible-md.visible-xs { - display: block !important; - } - tr.visible-md.visible-xs { - display: table-row !important; - } - th.visible-md.visible-xs, - td.visible-md.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-md.visible-sm { - display: block !important; - } - tr.visible-md.visible-sm { - display: table-row !important; - } - th.visible-md.visible-sm, - td.visible-md.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md { - display: block !important; - } - tr.visible-md { - display: table-row !important; - } - th.visible-md, - td.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-md.visible-lg { - display: block !important; - } - tr.visible-md.visible-lg { - display: table-row !important; - } - th.visible-md.visible-lg, - td.visible-md.visible-lg { - display: table-cell !important; - } -} - -.visible-lg { - display: none !important; -} - -tr.visible-lg { - display: none !important; -} - -th.visible-lg, -td.visible-lg { - display: none !important; -} - -@media (max-width: 767px) { - .visible-lg.visible-xs { - display: block !important; - } - tr.visible-lg.visible-xs { - display: table-row !important; - } - th.visible-lg.visible-xs, - td.visible-lg.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-lg.visible-sm { - display: block !important; - } - tr.visible-lg.visible-sm { - display: table-row !important; - } - th.visible-lg.visible-sm, - td.visible-lg.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-lg.visible-md { - display: block !important; - } - tr.visible-lg.visible-md { - display: table-row !important; - } - th.visible-lg.visible-md, - td.visible-lg.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-lg { - display: block !important; - } - tr.visible-lg { - display: table-row !important; - } - th.visible-lg, - td.visible-lg { - display: table-cell !important; - } -} - -.hidden-xs { - display: block !important; -} - -tr.hidden-xs { - display: table-row !important; -} - -th.hidden-xs, -td.hidden-xs { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-xs { - display: none !important; - } - tr.hidden-xs { - display: none !important; - } - th.hidden-xs, - td.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-xs.hidden-sm { - display: none !important; - } - tr.hidden-xs.hidden-sm { - display: none !important; - } - th.hidden-xs.hidden-sm, - td.hidden-xs.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-xs.hidden-md { - display: none !important; - } - tr.hidden-xs.hidden-md { - display: none !important; - } - th.hidden-xs.hidden-md, - td.hidden-xs.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-xs.hidden-lg { - display: none !important; - } - tr.hidden-xs.hidden-lg { - display: none !important; - } - th.hidden-xs.hidden-lg, - td.hidden-xs.hidden-lg { - display: none !important; - } -} - -.hidden-sm { - display: block !important; -} - -tr.hidden-sm { - display: table-row !important; -} - -th.hidden-sm, -td.hidden-sm { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-sm.hidden-xs { - display: none !important; - } - tr.hidden-sm.hidden-xs { - display: none !important; - } - th.hidden-sm.hidden-xs, - td.hidden-sm.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-sm { - display: none !important; - } - tr.hidden-sm { - display: none !important; - } - th.hidden-sm, - td.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-sm.hidden-md { - display: none !important; - } - tr.hidden-sm.hidden-md { - display: none !important; - } - th.hidden-sm.hidden-md, - td.hidden-sm.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-sm.hidden-lg { - display: none !important; - } - tr.hidden-sm.hidden-lg { - display: none !important; - } - th.hidden-sm.hidden-lg, - td.hidden-sm.hidden-lg { - display: none !important; - } -} - -.hidden-md { - display: block !important; -} - -tr.hidden-md { - display: table-row !important; -} - -th.hidden-md, -td.hidden-md { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-md.hidden-xs { - display: none !important; - } - tr.hidden-md.hidden-xs { - display: none !important; - } - th.hidden-md.hidden-xs, - td.hidden-md.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-md.hidden-sm { - display: none !important; - } - tr.hidden-md.hidden-sm { - display: none !important; - } - th.hidden-md.hidden-sm, - td.hidden-md.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-md { - display: none !important; - } - tr.hidden-md { - display: none !important; - } - th.hidden-md, - td.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-md.hidden-lg { - display: none !important; - } - tr.hidden-md.hidden-lg { - display: none !important; - } - th.hidden-md.hidden-lg, - td.hidden-md.hidden-lg { - display: none !important; - } -} - -.hidden-lg { - display: block !important; -} - -tr.hidden-lg { - display: table-row !important; -} - -th.hidden-lg, -td.hidden-lg { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-lg.hidden-xs { - display: none !important; - } - tr.hidden-lg.hidden-xs { - display: none !important; - } - th.hidden-lg.hidden-xs, - td.hidden-lg.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-lg.hidden-sm { - display: none !important; - } - tr.hidden-lg.hidden-sm { - display: none !important; - } - th.hidden-lg.hidden-sm, - td.hidden-lg.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-lg.hidden-md { - display: none !important; - } - tr.hidden-lg.hidden-md { - display: none !important; - } - th.hidden-lg.hidden-md, - td.hidden-lg.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-lg { - display: none !important; - } - tr.hidden-lg { - display: none !important; - } - th.hidden-lg, - td.hidden-lg { - display: none !important; - } -} - -.visible-print { - display: none !important; -} - -tr.visible-print { - display: none !important; -} - -th.visible-print, -td.visible-print { - display: none !important; -} - -@media print { - .visible-print { - display: block !important; - } - tr.visible-print { - display: table-row !important; - } - th.visible-print, - td.visible-print { - display: table-cell !important; - } - .hidden-print { - display: none !important; - } - tr.hidden-print { - display: none !important; - } - th.hidden-print, - td.hidden-print { - display: none !important; - } -} \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css old mode 100755 new mode 100644 index a553c4f5e08a..ed3905e0e0c9 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css @@ -1,9 +1,6 @@ /*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - *//*! normalize.css v2.1.0 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden]{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{margin:.67em 0;font-size:2em}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}dfn{font-style:italic}hr{height:0;-moz-box-sizing:content-box;box-sizing:content-box}mark{color:#000;background:#ff0}code,kbd,pre,samp{font-family:monospace,serif;font-size:1em}pre{white-space:pre-wrap}q{quotes:"\201C" "\201D" "\2018" "\2019"}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:0}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid #c0c0c0}legend{padding:0;border:0}button,input,select,textarea{margin:0;font-family:inherit;font-size:100%}button,input{line-height:normal}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}button[disabled],html input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{padding:0;box-sizing:border-box}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:2cm .5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}*,*:before,*:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}button,input,select[multiple],textarea{background-image:none}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}img{vertical-align:middle}.img-responsive{display:block;height:auto;max-width:100%}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0 0 0 0);border:0}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16.099999999999998px;font-weight:200;line-height:1.4}@media(min-width:768px){.lead{font-size:21px}}small{font-size:85%}cite{font-style:normal}.text-muted{color:#999}.text-primary{color:#428bca}.text-warning{color:#c09853}.text-danger{color:#b94a48}.text-success{color:#468847}.text-info{color:#3a87ad}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-weight:500;line-height:1.1}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{margin-top:20px;margin-bottom:10px}h4,h5,h6{margin-top:10px;margin-bottom:10px}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}h1 small,.h1 small{font-size:24px}h2 small,.h2 small{font-size:18px}h3 small,.h3 small,h4 small,.h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-bottom:20px}dt,dd{line-height:1.428571429}dt{font-weight:bold}dd{margin-left:0}@media(min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{font-size:17.5px;font-weight:300;line-height:1.25}blockquote p:last-child{margin-bottom:0}blockquote small{display:block;line-height:1.428571429;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:1.428571429}code,pre{font-family:Monaco,Menlo,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;white-space:nowrap;background-color:#f9f2f4;border-radius:4px}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.428571429;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.row{margin-right:-15px;margin-left:-15px}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11{float:left}.col-xs-1{width:8.333333333333332%}.col-xs-2{width:16.666666666666664%}.col-xs-3{width:25%}.col-xs-4{width:33.33333333333333%}.col-xs-5{width:41.66666666666667%}.col-xs-6{width:50%}.col-xs-7{width:58.333333333333336%}.col-xs-8{width:66.66666666666666%}.col-xs-9{width:75%}.col-xs-10{width:83.33333333333334%}.col-xs-11{width:91.66666666666666%}.col-xs-12{width:100%}@media(min-width:768px){.container{max-width:750px}.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11{float:left}.col-sm-1{width:8.333333333333332%}.col-sm-2{width:16.666666666666664%}.col-sm-3{width:25%}.col-sm-4{width:33.33333333333333%}.col-sm-5{width:41.66666666666667%}.col-sm-6{width:50%}.col-sm-7{width:58.333333333333336%}.col-sm-8{width:66.66666666666666%}.col-sm-9{width:75%}.col-sm-10{width:83.33333333333334%}.col-sm-11{width:91.66666666666666%}.col-sm-12{width:100%}.col-sm-push-1{left:8.333333333333332%}.col-sm-push-2{left:16.666666666666664%}.col-sm-push-3{left:25%}.col-sm-push-4{left:33.33333333333333%}.col-sm-push-5{left:41.66666666666667%}.col-sm-push-6{left:50%}.col-sm-push-7{left:58.333333333333336%}.col-sm-push-8{left:66.66666666666666%}.col-sm-push-9{left:75%}.col-sm-push-10{left:83.33333333333334%}.col-sm-push-11{left:91.66666666666666%}.col-sm-pull-1{right:8.333333333333332%}.col-sm-pull-2{right:16.666666666666664%}.col-sm-pull-3{right:25%}.col-sm-pull-4{right:33.33333333333333%}.col-sm-pull-5{right:41.66666666666667%}.col-sm-pull-6{right:50%}.col-sm-pull-7{right:58.333333333333336%}.col-sm-pull-8{right:66.66666666666666%}.col-sm-pull-9{right:75%}.col-sm-pull-10{right:83.33333333333334%}.col-sm-pull-11{right:91.66666666666666%}.col-sm-offset-1{margin-left:8.333333333333332%}.col-sm-offset-2{margin-left:16.666666666666664%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-4{margin-left:33.33333333333333%}.col-sm-offset-5{margin-left:41.66666666666667%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-7{margin-left:58.333333333333336%}.col-sm-offset-8{margin-left:66.66666666666666%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-10{margin-left:83.33333333333334%}.col-sm-offset-11{margin-left:91.66666666666666%}}@media(min-width:992px){.container{max-width:970px}.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11{float:left}.col-md-1{width:8.333333333333332%}.col-md-2{width:16.666666666666664%}.col-md-3{width:25%}.col-md-4{width:33.33333333333333%}.col-md-5{width:41.66666666666667%}.col-md-6{width:50%}.col-md-7{width:58.333333333333336%}.col-md-8{width:66.66666666666666%}.col-md-9{width:75%}.col-md-10{width:83.33333333333334%}.col-md-11{width:91.66666666666666%}.col-md-12{width:100%}.col-md-push-0{left:auto}.col-md-push-1{left:8.333333333333332%}.col-md-push-2{left:16.666666666666664%}.col-md-push-3{left:25%}.col-md-push-4{left:33.33333333333333%}.col-md-push-5{left:41.66666666666667%}.col-md-push-6{left:50%}.col-md-push-7{left:58.333333333333336%}.col-md-push-8{left:66.66666666666666%}.col-md-push-9{left:75%}.col-md-push-10{left:83.33333333333334%}.col-md-push-11{left:91.66666666666666%}.col-md-pull-0{right:auto}.col-md-pull-1{right:8.333333333333332%}.col-md-pull-2{right:16.666666666666664%}.col-md-pull-3{right:25%}.col-md-pull-4{right:33.33333333333333%}.col-md-pull-5{right:41.66666666666667%}.col-md-pull-6{right:50%}.col-md-pull-7{right:58.333333333333336%}.col-md-pull-8{right:66.66666666666666%}.col-md-pull-9{right:75%}.col-md-pull-10{right:83.33333333333334%}.col-md-pull-11{right:91.66666666666666%}.col-md-offset-0{margin-left:0}.col-md-offset-1{margin-left:8.333333333333332%}.col-md-offset-2{margin-left:16.666666666666664%}.col-md-offset-3{margin-left:25%}.col-md-offset-4{margin-left:33.33333333333333%}.col-md-offset-5{margin-left:41.66666666666667%}.col-md-offset-6{margin-left:50%}.col-md-offset-7{margin-left:58.333333333333336%}.col-md-offset-8{margin-left:66.66666666666666%}.col-md-offset-9{margin-left:75%}.col-md-offset-10{margin-left:83.33333333333334%}.col-md-offset-11{margin-left:91.66666666666666%}}@media(min-width:1200px){.container{max-width:1170px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11{float:left}.col-lg-1{width:8.333333333333332%}.col-lg-2{width:16.666666666666664%}.col-lg-3{width:25%}.col-lg-4{width:33.33333333333333%}.col-lg-5{width:41.66666666666667%}.col-lg-6{width:50%}.col-lg-7{width:58.333333333333336%}.col-lg-8{width:66.66666666666666%}.col-lg-9{width:75%}.col-lg-10{width:83.33333333333334%}.col-lg-11{width:91.66666666666666%}.col-lg-12{width:100%}.col-lg-push-0{left:auto}.col-lg-push-1{left:8.333333333333332%}.col-lg-push-2{left:16.666666666666664%}.col-lg-push-3{left:25%}.col-lg-push-4{left:33.33333333333333%}.col-lg-push-5{left:41.66666666666667%}.col-lg-push-6{left:50%}.col-lg-push-7{left:58.333333333333336%}.col-lg-push-8{left:66.66666666666666%}.col-lg-push-9{left:75%}.col-lg-push-10{left:83.33333333333334%}.col-lg-push-11{left:91.66666666666666%}.col-lg-pull-0{right:auto}.col-lg-pull-1{right:8.333333333333332%}.col-lg-pull-2{right:16.666666666666664%}.col-lg-pull-3{right:25%}.col-lg-pull-4{right:33.33333333333333%}.col-lg-pull-5{right:41.66666666666667%}.col-lg-pull-6{right:50%}.col-lg-pull-7{right:58.333333333333336%}.col-lg-pull-8{right:66.66666666666666%}.col-lg-pull-9{right:75%}.col-lg-pull-10{right:83.33333333333334%}.col-lg-pull-11{right:91.66666666666666%}.col-lg-offset-0{margin-left:0}.col-lg-offset-1{margin-left:8.333333333333332%}.col-lg-offset-2{margin-left:16.666666666666664%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-4{margin-left:33.33333333333333%}.col-lg-offset-5{margin-left:41.66666666666667%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-7{margin-left:58.333333333333336%}.col-lg-offset-8{margin-left:66.66666666666666%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-10{margin-left:83.33333333333334%}.col-lg-offset-11{margin-left:91.66666666666666%}}table{max-width:100%;background-color:transparent}th{text-align:left}.table{width:100%;margin-bottom:20px}.table thead>tr>th,.table tbody>tr>th,.table tfoot>tr>th,.table thead>tr>td,.table tbody>tr>td,.table tfoot>tr>td{padding:8px;line-height:1.428571429;vertical-align:top;border-top:1px solid #ddd}.table thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table caption+thead tr:first-child th,.table colgroup+thead tr:first-child th,.table thead:first-child tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed thead>tr>th,.table-condensed tbody>tr>th,.table-condensed tfoot>tr>th,.table-condensed thead>tr>td,.table-condensed tbody>tr>td,.table-condensed tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*="col-"]{display:table-column;float:none}table td[class*="col-"],table th[class*="col-"]{display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8;border-color:#d6e9c6}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td{background-color:#d0e9c6;border-color:#c9e2b3}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede;border-color:#eed3d7}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td{background-color:#ebcccc;border-color:#e6c1c7}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3;border-color:#fbeed5}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td{background-color:#faf2cc;border-color:#f8e5be}@media(max-width:768px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:scroll;overflow-y:hidden;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0;background-color:#fff}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>thead>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>thead>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;margin-bottom:5px;font-weight:bold}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type="file"]{display:block}select[multiple],select[size]{height:auto}select optgroup{font-family:inherit;font-size:inherit;font-style:inherit}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}input[type="number"]::-webkit-outer-spin-button,input[type="number"]::-webkit-inner-spin-button{height:auto}.form-control:-moz-placeholder{color:#999}.form-control::-moz-placeholder{color:#999}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.428571429;color:#555;vertical-align:middle;background-color:#fff;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6)}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee}textarea.form-control{height:auto}.form-group{margin-bottom:15px}.radio,.checkbox{display:block;min-height:20px;padding-left:20px;margin-top:10px;margin-bottom:10px;vertical-align:middle}.radio label,.checkbox label{display:inline;margin-bottom:0;font-weight:normal;cursor:pointer}.radio input[type="radio"],.radio-inline input[type="radio"],.checkbox input[type="checkbox"],.checkbox-inline input[type="checkbox"]{float:left;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:normal;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type="radio"][disabled],input[type="checkbox"][disabled],.radio[disabled],.radio-inline[disabled],.checkbox[disabled],.checkbox-inline[disabled],fieldset[disabled] input[type="radio"],fieldset[disabled] input[type="checkbox"],fieldset[disabled] .radio,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm{height:auto}.input-lg{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:45px;line-height:45px}textarea.input-lg{height:auto}.has-warning .help-block,.has-warning .control-label{color:#c09853}.has-warning .form-control{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-warning .form-control:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.has-warning .input-group-addon{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.has-error .help-block,.has-error .control-label{color:#b94a48}.has-error .form-control{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-error .form-control:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.has-error .input-group-addon{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.has-success .help-block,.has-success .control-label{color:#468847}.has-success .form-control{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-success .form-control:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.has-success .input-group-addon{color:#468847;background-color:#dff0d8;border-color:#468847}.form-control-static{padding-top:7px;margin-bottom:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media(min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block}.form-inline .radio,.form-inline .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:none;margin-left:0}}.form-horizontal .control-label,.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}@media(min-width:768px){.form-horizontal .control-label{text-align:right}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:normal;line-height:1.428571429;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;border:1px solid transparent;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{color:#333;background-color:#ebebeb;border-color:#adadad}.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{color:#fff;background-color:#3276b1;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{color:#fff;background-color:#ed9c28;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{color:#fff;background-color:#d2322d;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{color:#fff;background-color:#47a447;border-color:#398439}.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{color:#fff;background-color:#39b3d7;border-color:#269abc}.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-link{font-weight:normal;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999;text-decoration:none}.btn-lg{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-xs{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs{padding:1px 5px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;transition:height .35s ease}@font-face{font-family:'Glyphicons Halflings';src:url('../fonts/glyphicons-halflings-regular.eot');src:url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'),url('../fonts/glyphicons-halflings-regular.woff') format('woff'),url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'),url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';-webkit-font-smoothing:antialiased;font-style:normal;font-weight:normal;line-height:1}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-print:before{content:"\e045"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-briefcase:before{content:"\1f4bc"}.glyphicon-calendar:before{content:"\1f4c5"}.glyphicon-pushpin:before{content:"\1f4cc"}.glyphicon-paperclip:before{content:"\1f4ce"}.glyphicon-camera:before{content:"\1f4f7"}.glyphicon-lock:before{content:"\1f512"}.glyphicon-bell:before{content:"\1f514"}.glyphicon-bookmark:before{content:"\1f516"}.glyphicon-fire:before{content:"\1f525"}.glyphicon-wrench:before{content:"\1f527"}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid #000;border-right:4px solid transparent;border-bottom:0 dotted;border-left:4px solid transparent;content:""}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,0.175);box-shadow:0 6px 12px rgba(0,0,0,0.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:1.428571429;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#fff;text-decoration:none;background-color:#428bca}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.428571429;color:#999}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0 dotted;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media(min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}}.btn-default .caret{border-top-color:#333}.btn-primary .caret,.btn-success .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret{border-top-color:#fff}.dropup .btn-default .caret{border-bottom-color:#333}.dropup .btn-primary .caret,.dropup .btn-success .caret,.dropup .btn-warning .caret,.dropup .btn-danger .caret,.dropup .btn-info .caret{border-bottom-color:#fff}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar .btn-group{float:left}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group,.btn-toolbar>.btn-group+.btn-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group-xs>.btn{padding:5px 10px;padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-right-radius:0;border-bottom-left-radius:4px;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child>.btn:last-child,.btn-group-vertical>.btn-group:first-child>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;border-collapse:separate;table-layout:fixed}.btn-group-justified .btn{display:table-cell;float:none;width:1%}[data-toggle="buttons"]>.btn>input[type="radio"],[data-toggle="buttons"]>.btn>input[type="checkbox"]{display:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group.col{float:none;padding-right:0;padding-left:0}.input-group .form-control{width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:45px;line-height:45px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:normal;line-height:1;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type="radio"],.input-group-addon input[type="checkbox"]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-4px}.input-group-btn>.btn:hover,.input-group-btn>.btn:active{z-index:2}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.428571429;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}}.nav-tabs.nav-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs.nav-justified>.active>a{border-bottom-color:#fff}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:5px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-justified>li{display:table-cell;width:1%}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs-justified>.active>a{border-bottom-color:#fff}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.nav .caret{border-top-color:#428bca;border-bottom-color:#428bca}.nav a:hover .caret{border-top-color:#2a6496;border-bottom-color:#2a6496}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;z-index:1000;min-height:50px;margin-bottom:20px;border:1px solid transparent}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}@media(min-width:768px){.navbar{border-radius:4px}}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}@media(min-width:768px){.navbar-header{float:left}}.navbar-collapse{max-height:340px;padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;box-shadow:inset 0 1px 0 rgba(255,255,255,0.1);-webkit-overflow-scrolling:touch}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse.in{overflow-y:auto}@media(min-width:768px){.navbar-collapse{width:auto;border-top:0;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-collapse .navbar-nav.navbar-left:first-child{margin-left:-15px}.navbar-collapse .navbar-nav.navbar-right:last-child{margin-right:-15px}.navbar-collapse .navbar-text:last-child{margin-right:0}}.container>.navbar-header,.container>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media(min-width:768px){.container>.navbar-header,.container>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{border-width:0 0 1px}@media(min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;border-width:0 0 1px}@media(min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;z-index:1030}.navbar-fixed-bottom{bottom:0;margin-bottom:0}.navbar-brand{float:left;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media(min-width:768px){.navbar>.container .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;border:1px solid transparent;border-radius:4px}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media(min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media(max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media(min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}@media(min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}@media(min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.navbar-form .radio input[type="radio"],.navbar-form .checkbox input[type="checkbox"]{float:none;margin-left:0}}@media(max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media(min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-nav.pull-right>li>.dropdown-menu,.navbar-nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-text{float:left;margin-top:15px;margin-bottom:15px}@media(min-width:768px){.navbar-text{margin-right:15px;margin-left:15px}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#ccc}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e6e6e6}.navbar-default .navbar-nav>.dropdown>a:hover .caret,.navbar-default .navbar-nav>.dropdown>a:focus .caret{border-top-color:#333;border-bottom-color:#333}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.open>a .caret,.navbar-default .navbar-nav>.open>a:hover .caret,.navbar-default .navbar-nav>.open>a:focus .caret{border-top-color:#555;border-bottom-color:#555}.navbar-default .navbar-nav>.dropdown>a .caret{border-top-color:#777;border-bottom-color:#777}@media(max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#999}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .navbar-nav>li>a{color:#999}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.dropdown>a:hover .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-nav>.dropdown>a .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .navbar-nav>.open>a .caret,.navbar-inverse .navbar-nav>.open>a:hover .caret,.navbar-inverse .navbar-nav>.open>a:focus .caret{border-top-color:#fff;border-bottom-color:#fff}@media(max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#999}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#999}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.428571429;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:4px;border-top-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{background-color:#eee}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:6px;border-top-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:3px;border-top-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}.label[href]:hover,.label[href]:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.label-default{background-color:#999}.label-default[href]:hover,.label-default[href]:focus{background-color:#808080}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#999;border-radius:10px}.badge:empty{display:none}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}.btn .badge{position:relative;top:-1px}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;font-size:21px;font-weight:200;line-height:2.1428571435;color:inherit;background-color:#eee}.jumbotron h1{line-height:1;color:inherit}.jumbotron p{line-height:1.4}.container .jumbotron{border-radius:6px}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1{font-size:63px}}.thumbnail{display:inline-block;display:block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img{display:block;height:auto;max-width:100%}a.thumbnail:hover,a.thumbnail:focus{border-color:#428bca}.thumbnail>img{margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:bold}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable{padding-right:35px}.alert-dismissable .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#356635}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#2d6987}.alert-warning{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.alert-warning hr{border-top-color:#f8e5be}.alert-warning .alert-link{color:#a47e3c}.alert-danger{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger hr{border-top-color:#e6c1c7}.alert-danger .alert-link{color:#953b39}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-size:40px 40px}.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-right-radius:4px;border-top-left-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{text-decoration:none;background-color:#f5f5f5}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.05);box-shadow:0 1px 1px rgba(0,0,0,0.05)}.panel-body{padding:15px}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0}.panel>.list-group .list-group-item:first-child{border-top-right-radius:0;border-top-left-radius:0}.panel>.list-group .list-group-item:last-child{border-bottom:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.panel>.table{margin-bottom:0}.panel>.panel-body+.table{border-top:1px solid #ddd}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:3px;border-top-left-radius:3px}.panel-title{margin-top:0;margin-bottom:0;font-size:16px}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-group .panel{margin-bottom:0;overflow:hidden;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse .panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse .panel-body{border-top-color:#ddd}.panel-default>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse .panel-body{border-top-color:#428bca}.panel-primary>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse .panel-body{border-top-color:#d6e9c6}.panel-success>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#d6e9c6}.panel-warning{border-color:#fbeed5}.panel-warning>.panel-heading{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.panel-warning>.panel-heading+.panel-collapse .panel-body{border-top-color:#fbeed5}.panel-warning>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#fbeed5}.panel-danger{border-color:#eed3d7}.panel-danger>.panel-heading{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.panel-danger>.panel-heading+.panel-collapse .panel-body{border-top-color:#eed3d7}.panel-danger>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#eed3d7}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse .panel-body{border-top-color:#bce8f1}.panel-info>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#bce8f1}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:bold;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}body.modal-open,.modal-open .navbar-fixed-top,.modal-open .navbar-fixed-bottom{margin-right:15px}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;display:none;overflow:auto;overflow-y:scroll}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-moz-transition:-moz-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.modal-dialog{z-index:1050;width:auto;padding:10px;margin-right:auto;margin-left:auto}.modal-content{position:relative;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,0.5);box-shadow:0 3px 9px rgba(0,0,0,0.5);background-clip:padding-box}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1030;background-color:#000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{min-height:16.428571429px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.428571429}.modal-body{position:relative;padding:20px}.modal-footer{padding:19px 20px 20px;margin-top:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}@media screen and (min-width:768px){.modal-dialog{right:auto;left:50%;width:600px;padding-top:30px;padding-bottom:30px}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,0.5);box-shadow:0 5px 15px rgba(0,0,0,0.5)}}.tooltip{position:absolute;z-index:1030;display:block;font-size:12px;line-height:1.4;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0;content:" "}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0;content:" "}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0;content:" "}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0;content:" "}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;height:auto;max-width:100%;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6);opacity:.5;filter:alpha(opacity=50)}.carousel-control.left{background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.5)),to(rgba(0,0,0,0.0001)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.5) 0),color-stop(rgba(0,0,0,0.0001) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000',endColorstr='#00000000',GradientType=1)}.carousel-control.right{right:0;left:auto;background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.0001)),to(rgba(0,0,0,0.5)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.0001) 0),color-stop(rgba(0,0,0,0.5) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000',endColorstr='#80000000',GradientType=1)}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;left:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;margin-left:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;margin-left:-15px;font-size:30px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after{display:table;content:" "}.clearfix:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.affix{position:fixed}@-ms-viewport{width:device-width}@media screen and (max-width:400px){@-ms-viewport{width:320px}}.hidden{display:none!important;visibility:hidden!important}.visible-xs{display:none!important}tr.visible-xs{display:none!important}th.visible-xs,td.visible-xs{display:none!important}@media(max-width:767px){.visible-xs{display:block!important}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-xs.visible-sm{display:block!important}tr.visible-xs.visible-sm{display:table-row!important}th.visible-xs.visible-sm,td.visible-xs.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-xs.visible-md{display:block!important}tr.visible-xs.visible-md{display:table-row!important}th.visible-xs.visible-md,td.visible-xs.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-xs.visible-lg{display:block!important}tr.visible-xs.visible-lg{display:table-row!important}th.visible-xs.visible-lg,td.visible-xs.visible-lg{display:table-cell!important}}.visible-sm{display:none!important}tr.visible-sm{display:none!important}th.visible-sm,td.visible-sm{display:none!important}@media(max-width:767px){.visible-sm.visible-xs{display:block!important}tr.visible-sm.visible-xs{display:table-row!important}th.visible-sm.visible-xs,td.visible-sm.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-sm{display:block!important}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-sm.visible-md{display:block!important}tr.visible-sm.visible-md{display:table-row!important}th.visible-sm.visible-md,td.visible-sm.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-sm.visible-lg{display:block!important}tr.visible-sm.visible-lg{display:table-row!important}th.visible-sm.visible-lg,td.visible-sm.visible-lg{display:table-cell!important}}.visible-md{display:none!important}tr.visible-md{display:none!important}th.visible-md,td.visible-md{display:none!important}@media(max-width:767px){.visible-md.visible-xs{display:block!important}tr.visible-md.visible-xs{display:table-row!important}th.visible-md.visible-xs,td.visible-md.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-md.visible-sm{display:block!important}tr.visible-md.visible-sm{display:table-row!important}th.visible-md.visible-sm,td.visible-md.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-md{display:block!important}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-md.visible-lg{display:block!important}tr.visible-md.visible-lg{display:table-row!important}th.visible-md.visible-lg,td.visible-md.visible-lg{display:table-cell!important}}.visible-lg{display:none!important}tr.visible-lg{display:none!important}th.visible-lg,td.visible-lg{display:none!important}@media(max-width:767px){.visible-lg.visible-xs{display:block!important}tr.visible-lg.visible-xs{display:table-row!important}th.visible-lg.visible-xs,td.visible-lg.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-lg.visible-sm{display:block!important}tr.visible-lg.visible-sm{display:table-row!important}th.visible-lg.visible-sm,td.visible-lg.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-lg.visible-md{display:block!important}tr.visible-lg.visible-md{display:table-row!important}th.visible-lg.visible-md,td.visible-lg.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-lg{display:block!important}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}.hidden-xs{display:block!important}tr.hidden-xs{display:table-row!important}th.hidden-xs,td.hidden-xs{display:table-cell!important}@media(max-width:767px){.hidden-xs{display:none!important}tr.hidden-xs{display:none!important}th.hidden-xs,td.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-xs.hidden-sm{display:none!important}tr.hidden-xs.hidden-sm{display:none!important}th.hidden-xs.hidden-sm,td.hidden-xs.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-xs.hidden-md{display:none!important}tr.hidden-xs.hidden-md{display:none!important}th.hidden-xs.hidden-md,td.hidden-xs.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-xs.hidden-lg{display:none!important}tr.hidden-xs.hidden-lg{display:none!important}th.hidden-xs.hidden-lg,td.hidden-xs.hidden-lg{display:none!important}}.hidden-sm{display:block!important}tr.hidden-sm{display:table-row!important}th.hidden-sm,td.hidden-sm{display:table-cell!important}@media(max-width:767px){.hidden-sm.hidden-xs{display:none!important}tr.hidden-sm.hidden-xs{display:none!important}th.hidden-sm.hidden-xs,td.hidden-sm.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}tr.hidden-sm{display:none!important}th.hidden-sm,td.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-sm.hidden-md{display:none!important}tr.hidden-sm.hidden-md{display:none!important}th.hidden-sm.hidden-md,td.hidden-sm.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-sm.hidden-lg{display:none!important}tr.hidden-sm.hidden-lg{display:none!important}th.hidden-sm.hidden-lg,td.hidden-sm.hidden-lg{display:none!important}}.hidden-md{display:block!important}tr.hidden-md{display:table-row!important}th.hidden-md,td.hidden-md{display:table-cell!important}@media(max-width:767px){.hidden-md.hidden-xs{display:none!important}tr.hidden-md.hidden-xs{display:none!important}th.hidden-md.hidden-xs,td.hidden-md.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-md.hidden-sm{display:none!important}tr.hidden-md.hidden-sm{display:none!important}th.hidden-md.hidden-sm,td.hidden-md.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}tr.hidden-md{display:none!important}th.hidden-md,td.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-md.hidden-lg{display:none!important}tr.hidden-md.hidden-lg{display:none!important}th.hidden-md.hidden-lg,td.hidden-md.hidden-lg{display:none!important}}.hidden-lg{display:block!important}tr.hidden-lg{display:table-row!important}th.hidden-lg,td.hidden-lg{display:table-cell!important}@media(max-width:767px){.hidden-lg.hidden-xs{display:none!important}tr.hidden-lg.hidden-xs{display:none!important}th.hidden-lg.hidden-xs,td.hidden-lg.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-lg.hidden-sm{display:none!important}tr.hidden-lg.hidden-sm{display:none!important}th.hidden-lg.hidden-sm,td.hidden-lg.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-lg.hidden-md{display:none!important}tr.hidden-lg.hidden-md{display:none!important}th.hidden-lg.hidden-md,td.hidden-lg.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-lg{display:none!important}tr.hidden-lg{display:none!important}th.hidden-lg,td.hidden-lg{display:none!important}}.visible-print{display:none!important}tr.visible-print{display:none!important}th.visible-print,td.visible-print{display:none!important}@media print{.visible-print{display:block!important}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}.hidden-print{display:none!important}tr.hidden-print{display:none!important}th.hidden-print,td.hidden-print{display:none!important}} \ No newline at end of file + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;filter:alpha(opacity=0);opacity:0;line-break:auto}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);line-break:auto}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} +/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot old mode 100755 new mode 100644 index 87eaa434234e2a984c261e0450a2f4ad837aa7b4..b93a4953fff68df523aa7656497ee339d6026d64 GIT binary patch delta 19939 zcmV(tKR!hM z403H!GIMLm%TP)pH7k~9Z0T1jY1LPjFGcQ=oYMs~BdpY;Ri=KjW)O*y8&yQy#JTr~ ztFhopw=v-_A^!d^p1R6&ssc9|l9OwSm6QhtD1W+$p=cjY_~|%rknLn7d2x=pF|*>< zITnLJ5iN;=NJo+Z^cb~MBKSsX^aR+THbXO%q$Zw+!5;XkS2Z!g(;M8^LZ7c;%j!{K zQifG8^|pH|zkNbr!w)lh>qBE@2oPJglx5w$(y_=9rUFJI+SjGL61Ukl(R(u9q5 zddcb1nASCtGQ^?M;{{OKDW25+5uO{y!$yNQQ;Pv{ghs%7ZsBPZVSxg-xIjb^&~u18 zniwB_IUC}vr9Z{Gkp>y3!V3&id!|%t?K|GJ$DG$yTuz=^_?uBJyI$0DkbgJwW`7GX z8JNn(Q!_yf+znrDkbz)V95+yZ5^$Ki^xz0-tL?KP@MqXB;9qdN!O{d;fGs`4*QI_} zS|cyvG$2bx0AREryNm&P-vZJw2itrJM1lqs(X{~V0D>lD!=y=xw2u74kHI^)P3)9= zC$?{GhLmlh3rPzR1|fGGX*qmII)A6zr@*`C_saghdlmbB_PPCW1iHAXPx$5Z?x~;; z`kO46Ac9O?CWJ~b@s-&*F9YolIVQ|m_zdWiuzNfA`X|6MU>uTV0DnMs6i5$*c?d*W zB)R~EGl9zkh(vfYI1B^@gI9ncLwGfK7zlRm1a8 z4GTm{3M43Sod&%J?omOMLEMQ!mOv-RBn@Z-a2(1o&Q=mIAYh%kbH1u&(j>_=BqDS& zJ!Z6+G=xEfdsJ(d2QYkc7)Ye{5X8^|qwioFOeV3kbJS?2T>+G*K|G@xp^LI$QnV+d z0+k_yi-9ptrU5&n5?_jRGk@D7pdjM%>-rej>Q>_g>I*7%Wzj}pMSmbqB#wJ2*;hSq zIWlBzw7T9aS?qHb+iW_*!^^zywG7;=bv3D`-hb}Ubij3$UO2wY zHt6#o2grm}ZF%rf$t4#4b$;~nnk9ZQa?+S#{deD*UXb};sam6rTAg8{gR(Fo+2{q0 zp||=@_F$zhjtzBjz_I7*Swf{;nH#9{g@nox;i!Pr73zf(TL)$Ei|+3JIzXW6N+(4Z z+kq^vGanswVroE*aDS-5#^e|96SQWi$=?<8f9|!nWmh>U3G*Ae;W04hLbX-GTD2 zpbLXMrso0yp2tGfnJ5HSQbBN`X8B&Q`mZZzIiPXV-43~pihoVUbQ~C61RcBBs)2Cp zMU9nm2S8niKpyiCsaB1jYgVNS1$7hZ8suc@UeOU)AdMB}mIjwnKtDwt)%IC3^r)B| z$?shw;K#h0Gy}j&-sMa7!3X5lC&R(MWU}H=Lws%HZRKv{Bff$V2P7*rICUoxg^=fE zCi}mDTW(uS5r5&x9r==;u_b##Lqyt4IsslGpo@ajV#b*|CdvN~i0*=CGm|Yd(D-$< z=zp!p=FsrDgJKP7Oj_yMf>(k@Q_18>WX%Hcp*Aj&Yc){rgam9io;UnedXiK+PW?`y zk(Vz0B7myp{3(gW8JFymTPNRqRU!{T9D@XavzLNNm4AWTcByyJ+t_gHneDA~xnygC zh@Lsv2%C!KtElE>~I!zvb z#WyG5g?~&e=@&_8m&Qg=3A@UEY5=_kij3{V46G!=p~8e9#50*_K@;8N0D+P%Q;c5w z+J=yY2GHa!C2?iX6zuDMz{XK=tEU#MY;GcHzMcmrkdX)G(NSeTMr;mC@2DdBL30$q zDBUw1Svw-3OrF}*Wq@?$94Ew5dOS^6A*|n1ihnaCas^r$7CMNMVx=t(UL41SFM}je z{kL%s^-2UnCs$M$&vUm<)F-P4k)g~ z?IQ`1qsek{a+F%@`Myz5TkrKWr@e@zk%1@~a$TB>| z-$2w@7MXRC0D?9fv{J8;zuVpUkql`t7Fc(bVLAeK|$YxhA45FLVr9K{b0uftsG!h zi2V5J&a0HE)|eT>D*9sH&vlCwP^~l*c0iXamC6Jz2#=EPQRUR7BvyhFp&6C!KtnJJ z3++;((;NickWgEZ9T9puSf)rABW2{l3j*?UpguqP1x3hLPd-2nN!TQObF7|e8sZ8? z0uocDvMDe@j6W8mIe(R5Vl1|eKuenO%&BiC?gak)4v4H#v?Z(z&z?+mE1L?!E09Qh z%*K#Ev=&ZF%o{X}#ut*)LvB!X=(7zcPgL@p(k^N>;KZ~z$lP|ech;Pcs}f125+;=^ z-(Ui~ig_4QoseT;PZ3)qp=YqC2vMh;fVkRZsux@COT9_wpMRHN^asR@Oi=yP7`O=| zu)&lk?sq^;>a*=>O;G5@(|ya>4A$h&9#n@c=jss z%g|DkdeshD;)G&{!1~)%TU`1bYgGc7cI-&Xh}?0K2yo!0gh|5fNf_(JmnjGfi(*0z z2@qJ6P`XaEg@2lK4|?qhG7U-QJrX^nAf_K191gRH{iwq`eTHI+oZxZ((`sqnhNoe& zBO(c`7oR>c0lb2curcI8o1<_s*6rO;f~QXs8pi|^9q3P&b$Itt^!c#4&I(4)vMBm! z-Gr%6Crw0n%1rG`5hTd9oM`4I4f%_v&;$T6jPJxWn}3^AX`tNWa!5)Rl2EX807Om8 zJ916qf-98vmkKJD;GgZ-mo;@4G9(A8W)L%yOCaAo`B1V<=W@|wi%BVgnAYVMUh5xZ zVLl2a#o*DiVhbpr`j$WlERiy1EPf)UH``J>u0_!ujytTalt@_RDEhe)pBEf7iZWUQ zALQK#Cw~A`s@uQzi-Cj$50t$!N-O_fAtP@aj)a?(S$;IwE!TtcM^_&z@FCyBL6sUX zCOCj*5V*XQ_P<^l4B&lga}lxy$9bG{e$G&_YsK zuEt(8W%B}^{eq`XIJl2iEL%d0h84u9?nEPjSbv82U9v10*68A^OhR)`Lp~P`E28ah z&FWrwsO+Z-p!W`ykosyv>@GA9z*!MtiC=qV0j5DRJ#vol*ed{u1+BE!?xzHyIZ#i# zK_MXv3fFzKS;|lv0!n3s6_@8nZkn+rc>Ikq78I;6eNtxlCS4(>gF=1?ANx&E}(+io#QR%+xW?y-E1w$EH6e+|^Ggz@* z3s*KkL`|6$RQNnDgsXMyN6t%(>^Rv(bM)HKu>)7;k#dFIT;DfZT-D3}@>bI#Nf@T0 zjP+BYr>RZsCckg|JFox}n1b88@CI=cBP;*nmsx>!cv5*9dG@12zjL(;rkFj*Z{W)c~*%)Av^uo3pm(RntO(~(p%nPkp1NImL8 zgdWn0$q=j$eViuUgX+R8VNk=Tb}KqE7Uq8P!74Dku%d`$rpc4S_11%A2@Jx5_~FYT zB0(#OasZG>ZVaKv;B!Ib!N}T|ReueD{Q?B|q+*~*liN-<{v*xpv$`%|{1Th6b>yxUVfJ(F2HW5rqYkF%rA3;&C`ROGcK~!=wr#GJmo1eNu83 zdh?DfYF?$lam!o;h254Ta2bQUEA?Pz9c-Gj6CAyd;yP;!v~-p=MT8npVM)UeN*1h8 z4M1>ntwot?G2}EmHWK#qFJVZ|F1*jvuv7q8q;Qi=i?|5HOgN;yM(2dwnrWy34s7i) zmJ+gA*orvG4JC~hAuqxCdw;E{4${T(py`~8+2tL^Se$^m2V#hoy@dc3PRYUqcyPPA zb=|T=)_}5IT&ZMD*n2^?wy0VoFzKe9lK1+2HtgwfrihuSy?$n<`D#ZS?E$p|)=~z0M1Une zuBDJbl!cTwf~BM}f}CK+8X?XgU?9aEhPHQE^QV7_)!>0^F9>0sFhLTA0;|HgpVWc0 z56W3+vagr5@`z;Y=6`sak!w0;nuGuErXP|#)!5lrOf-RD9XLr@O+vv_Y-8ls-^k#Z zExSe1f+x<}jKw-E9sfkEu#o4`+4#n(I2bh3DSr3=1TqA-YJi`X$Dj~twzmWiLMTk* zwFU@$3nF2unx?NbEC!}xIH#1Q)RPx!AUQOq<D-q%#)(mRZYoavr@-J0J8q7i}HmULp71cDQe`%v>=hKF^x8GV#Dj(#*Rq$r>aJH z@`&-d0l$~8tHh&^B{bslQQ5>7Lj;M+h^G$42^=Ij(mKUWKJrD@-C+X(+{OZLtAWg) zUcwdFJOUX>cYody09j{ok2iB$h9B^f@Chw>&@tG#HHI1FVZ`H221}aPkmgcC$60`e z^G0!fx#H)s^f>PUz_KdHPITpvCoWlH6oizUq|NYJ70n*|cEw8yD9T+0Cdr%=9Euqz z4xk;?nWg<6GOs!?)eV5;Z=}wT(S;mPOKCz#G*$4qSbr=Nj-riQ7l`mA3<@x6o$bg;u!jFEF-^c}YwEkaO_ z$Paq-0Zz} zK_+p96Y5jgNm9ru$#VsB3OOox(%&#bYAJ07?BPhKLB95AdC-q&@=a}|CN#y0g*1Q* zfQQ`LXSknM<|cGm^V%M!0?$uFQ!ukxvh;#v2Y)wBc8v#4?`kP`>-Ry)D6Hade+?fZ z&7#R$rq&8YV4IZwit9IJdbq@ohMl|KEw@p?U8XkMD-%LvVa-5ZNy0j^(z~?Y{=OYO zg94YDt%!3|pXASO9b*U)$NAFR@+vEIL8#k(>{?4o;&WG<`5R z0eZ)ALy#Bjp~zOQDXhisSr2|g*x$*C1!qsb^$MoY=7cY z0?_aPMhw-^Z||1rNjR&a zd;YjOSybrrVkip~h_F!WYW@T~dSMTHxP=~fSfy`al`u3Qo->2Y1#HUrN65AoPLkj- zK8`}>I4EWb`gKQd$C}6vXL<3(*MD@QU&DERz|EA73d1Gj2ZgZv9c_>fhD-|R;2}vQ z%+t(;_W^9Tw=_D$u~bj?IUkmT900DZkRY23H97s|Pp;LLEih^xON!~!M{YDO0g6&>_iwSOlDl&+F8 z?;@Hp_w&*4Lv+45oSfSIJx9lmDSYM!14lw0(qbRYY zlP%N|?ttR3lWGA-aMbtHmUf&7?=P3m^ysOz6%(K=Ij(m+VE9Fohzji<0_;Yx7hz{X z(Akrq(1~=Zv?*+yWeS`4%zr!S47TR7wYn~9BP(ERX_27`9zu!^@#0N2n;+633%b7$#gRwSaEzWe zt6>U;){-BOC0Sb-RYy7InexWdKwJf4MCu&SSsaCudx@QUr0@TK`d^$VK%km?ZmHs3_3 zZ`nj=z(NM^-)I*dToo?{7&WD-HAtU3Z6p*G8(rde67yuJ&L& zlgah6GccX|AAc8U_l(6eUFuREW`70xm%lg^H3SwpbG%?ng$5oFO@O301M$lWSi4nm zh&+jyrCgYk##&F{b93mwO_H}J18N*5&ke-Z1_Jj9E~h1*4xrLd%0f(jUgX~a|M^FV z%ZQOPaN$IUtd%a0qSeAb3fzB`mP#8yDIFRpxysE5&jEGWEO->b50H$$f-B$yCA>A;PQYF=qoiOlt zc1K2eFGlz+o6mRpb%HPS?jI_D0qX}VlEw*K^};wnh&H0D!v3kWi@ReywBQaJ7EA(z zJ7bMontvYq_lgJG*$2Y93vO}=pHUT4CvT%sbr|wgu6qb^(vXl}oXx+FgM(89WU9MC z1S-R@I&JbeS#^3c|4>4!4eUs0U>Iz)Ym#v26aP$1XbNLRVm4*lj{wq(AU`35nGimF z3o1QUjRE|vyG!msp6ioxc#$yEM_?rga(QxFHh=1hSu>0`jKvFwW)~`#Di|AXmEa{g znc8G#!{{;dG2WuufnF!f{auNgmJL$P-{^6W+|(}E>h}OFoc5K8(_STnuWTB^+7aRn zYQ6?(NsP^A)^@*SVR5FONbo3`$I&lU#Ox~Sw#8Qv_MOn=k9;2R&Mpp@19CqDM_2^4tI>iFVX3_yRzC0 zvVRzSphv~J+1Odv!SMG)NVS-~Dw`5h{i==b=%nV78X9mmt}xSMk4iG*K*d+IC4bST zWAD6Q8eY>R(dQ!wDmq7}uI3mvj-l^vKFvfs`uPH-kbb0KIqPUxLLl)SRgSAl143WW zvWS@Z%ZTnJs{N7yw09RLxg@jbc6=jLJP$SpIOnQMO}5XGE!j0yE55d&C+!A83d_W* zPKA;9j$%se!%uc6$n1QJgexjdjDNgqC=X(w1Sa`mQYC$~ z3dLRjt!swyY7aDV7(5kV-b)e>st6D$4Lv2@tU$5gm^nF#tE(MtrzJ zQY6V0w|OiX4g|)rF2BIlCOvg=bKw~Cp-)7`@0Kxq*d^e6mNLm5@Do`v5Cw2Uod>Zvbiw&a_~S7#6izQMD_pyiaHVS0#P9TEfRq@{ z;TQl{%54?to5M*x1mC?9x6=q&1Cv^K9-#r$P0)!@{W!9d0WmKNN`FVI^~7FP2^1_) z$Ye23t`zeIUaWcFaB-9qY`=jHqcUjS(IohPYIgDXi~gzv9RaP(^`1$Z_9cgksHfrR z)LmBQ2F?@K3N8RATPs%*RX7{Lty_-eh5 z;f4e7vVDl5dk+};m>qCB-!gRU&4xToq(cHhV6wc^%9zID9)H7!>yw?QgexZriI<5U zg1M@16H+il9zQ|*Lu^U@0_O;?S~0A)n?^rs;it6u_2eFVUI3gIMYUu{Nu0G*uo1eU zCLd%We)~ zbZA)s2!vo+M}HfE6v*Rc2x)?~sPQO;VY(x5bG%oO^0C5cEiv-P(PEjaEXs~l>0@oG zY0XS(I0BWmc?8oHBkJ8A-rf(wv!}K z=?YC2lhK-BlC%G+2f#!0LtZ5^*?`RnSa4Bcx%8}|B)TVFiEx$DXS;l_U+<+zt$kM>Z_G%g98)wBe88EmMsEQQDBve zi9NO&J)BD$QXbUk)hsS)7!`A&XGtb%W{E8mNzEw#s(uWzjo#@zL&7Qfr=>^i2EaX{+=SSR;b-|!XD1Q%rTfreRJ<84f3*z{=lcZ=K9oglG zCP|s3>U%hTG@OhI3-D?V5GX2vfdOGbg;eeZQvN?;+AxGkle0_fzEJ-PxU8OlO$-MV zi~C+xk|Y6x)Wou_j33<{sVFae|4VfM|DBLR53qMf{EZbM2c{E zf`54yhAST$kpoh;U?j#-^~fQHvQMpo zAmKEtio1M`tjBR_`HDMp2ogWVzRj-H`Rjz*z8yh1T{UL+Hn(6zvu3( zA-rkp9_RUOewBy$_r>?r-1modAB6+mqJQe=)7%Mi0TC!PIC^pzj9EM5Dlx-(7*1=} zBsm0X*7O$;%4q=wa2zX$ab2M1uxIFLnP!yH?)lOWre=lIrPscosHE4skv?I!%h9KB zHqp=R)cd!#_~}vBpjzS*@BdfC!%}0CZGYhtN+VQT%XRc>CuO%t|9Dku4xFJ z;2_V$C@yH4C(8>$u?tkKvY&ho9-eyy21$fAV@w?Kp%5;)o*ttS>$#0-Nxtf_2Y9$auAb+`Uoii#nF!#4bj0c$5jsc!sbmWOH*DWP54Ot$E zOUGYsW^`R3w-+i_MnfOQbV`!2#8lK0D;A_&S6Yxb;qt`d*AQnLM$iPZ-~!oR3f`V( z6rHvpHJTDhJ+{~RBR2LfK=m?u(!)DfnqdDzFT|Nq1qJwu8fWdW{1s@58GmYdN}bag zuz*goL0F_Ss@(-F^`x5s3rplS=#uL;8;{zu*iA65R6wAPg-?5-qnI>n6KWr$d?Ku7 zbkU!b+k{p8f*3pq7A^c3-eHCmuGJalHDZtYDeYcTv9LIqgwDa9I)VCKti{d|858dHrBAUwkDIa+i}YPAqx%bCK=)OEF{w@Xz9j+{Fh4OWr3;oM zN7GuP(pBe^WLTt~=xS;He zr^g4$tebC9-7qGL0+j^&{_!k^Mn9qb#^i)vopUUQW0RS|Xn)I<|0Kgj6Z~UrFy#}v zcVPYEkGi?Vh^as?fN}?37HpU}V^p^@nVsBgK%3n9O2XZgv~^I>7zHNEs!qxCWcf;J z6jYE)xT(=TEnsuF$ z?<2!qKp$-NLw{N%X;9{2z2ms4bTBk|GBErNa}n6qs^Tm`b;co42h!L=6u9Eih_kykEmZGvvA`GPRhW+p;B^?2jDG8|F-XEn zs3<(nv5tcBy^zx#4zprX)Rwq$X$#Nk=Asa+NL|68ENgKLfm=vvT)*b?mN z&1o#~#eZU%G0caHqRru8y0Iwqs%?)*;ek%@?EGbkOQf(6-_$P(Ti~=P_4XrC^p{5l zWXg3K&Qz(re_r@)v!cHx`G$4i`|=o4rT_#e-e;cLl|RV$3b#x7gZ zWd*N`&(H(BvqQm;o{#_KB1c63&ZwiCJSsfa&wu{uXbY<;7^GkzL6}txKu=w06#Z<) zW-^q8AxNI*&qgBvQ9{D$J$1;wIcBtI2Y~ZCB2t8<+5nOPOWZRpf+z?_2`L4<8Bnp| z_@Sttrm_%JT`V`jWnm3P3G-yZ+({r;FH#6aupP&+556$Z!~1>)NV(c0XC0tLw~h|zS{&}cL%~Sm;+wvF*ot>2=Mk1>~P+@hblfqlsMy`5t#h2 zUnwM2#k%v#6;iYzY8p>Q^wvImh|nLBk$(%jbS$qe9ED;~wJdF5L?SPrdhkKw4b7AS z*rn>gJp&)0Hyy;pya{h+{Lq&CatlpG zRzQQ|4@mU`rs(qfFX;VY2t=jXrt3(RgRZV@OEoFNj9Lj#MYNolAA|)n5W%KN0Y|AP z(6bLa?nEcHdTc0>p!c0cpU=@Goqvih*_`k#R}2SICtP`M4lJ+a*%e z6e<5M@&|s36a_g2J6-e?F2b5;D=F9mCCHujYt_!c(uuREUeX`-PJjkiw?1MqdO96* zhNy2u3QLXlvbw@DcUycuIpVH%S4~FZuXWyFxg6^g&0%O2a6v))9Xwx5uQc6DG;#4E||S|IcgIn zLgOi*B$tfoLCZp-kLK(NgMZr^p8<3rVlHCR0znW(sGeGlQeyTBQzAG6RAd7 zNclk6O3*V6GiiR2bXm}%*=TD8NQY3*@z)O*_x&+9)8OC)rG{DDM1Pke+;Eq~raLFI zF#gk? z&AQVF2`Jt%M~hvM1g?W{YBE=~}YER(}m?OqHj18CH`ibI#~ZGAuEdFmqe!Dy zWt)R}5;tmWq3;N=0B_wBMQC4cGa#)Ai)wH`sx;_uGFQMw1sx}ARA1u_CZn(+$d+nW zY?w&x)4N}5$(rHF;`Yk3`43@P`HY~U8z6JeK;&D61Vj)<8mh4URGO!?I5cRA88)S3fcmpjcC^t5gsbK{JSew0VJ=N~hiofGgh?hJh0;;M2 zfc*1O1s=kNn1s4qkOGqm)C{U|Y3WGmn1`&3iD?EWX{3}Sk0PUcQz}-EcSehoFR*kx z?8?F6QGZ~{l3EtHnjS0gaB?Y|D7h{ZYPIROt4aK812y#tJOt#l63T|L%dW}REFfze zPmzb5L{x~%N+wS9@AoAb+Mnu;guV;rn)`$+sv3M8RAu)NJMH z6#En~Dy3GX--obPZPg!&r{-wJkLK7fQ(%0vUR*panS`msHtV=DsZKMUf+7bER1$BW z^9BTrY;xqqWuSr*A@u~sNHPnH6>x(=Al93xpcsb&-owuVdz;9-{x72*@}JQF=bUw9 zRev%nGV%~2W2?*}aD^m$sepNbEDPzSi}n!_941`gZN4VDU>wkVcp#>5g@{9{(sv`m z$@^E3IYg~V2Wj!qmP3e-5t{-_bMKbthn*c$wg;2rGw%#6whoaMmi|qfk~fda6uN#`dor6usf0fZiufY)iZjGQ2jOZIEPucd zOMVZ=1^{9sgfIfE$&yT%0c&H;C^jIt3<3(VVf+L|8aK|nfiKmLuO$GjApEfdoDa9} zAmGWwp;GH!N)-DW>%|fTL(h*|D>pG%+Zq$J&{alH1;#a zg7EEh!vr{|nOX!l2!_sS;H=6#I)7==KSp$mMr)RTLkw3ZnCby$Fac=-8$9AzWNMfB z5R8;C+Y(nJX%mYMi<*{Y2t&^bdPz5k&JIGb19KR+te7rKgAO1PASwmxjons}u$`+C zWdDbsM1gE-_CV~b?9O59Jd+X6}?xmP7Io&mLD*T66T%zy6#!SzTYbSmBr z!DA|7&&JxK0r#(Qu&68rwVCtd6C(jrn4cq}KH`9EqQ!iumXc1Y|%a zTd)}^IKvpuJ3O3w^MA@XJQ0@2D^^Ebut=D$rX6051g~M2i(!yu-7Pef`g7V!)IkXw zT;L3lXiZy!TwoON5|g|mzi>n~S~)joJF@peSK@8X?%&`~(vC0Wf^arMH;9SR)Ufe7W zDKXw`?hOOlSfh7P z5n4e+e|(}-4R8Hskc#zGq=4N>+UqH}&g9eQ$3Ch}F@K<94L2SG;SR8c$%a#lbs>Vq`XNmrA3?rk#-~o8%RW4- zI>)2d(xRYX0^GD{H4Oqakc_6u!A4LuT za$h&`h|;hOR@6wtApo<*N#}BmMNH?8Hh&qOAx^sh683Q-Cz2dwppyY%{&7uqy2THR zK_7x8?e)64z6tSmv+WhVjtg!)MbNQ{JAuP2+IOzsGH_2N9;|Ybhgg#aNcLcubx9+c zFPcla2E`N$@;iw&9~d@=-%Ly{oJ~m^IMfINaUUANq%>8LZsc;wp&24`Ii@K4+p?Hi2j{T#GRm@voPS|BL?jpg14mbOg(J+5`wcm+YsJMU1_Atu%Wv49 zoNs_Y;{D7CC|zf3?JIpR0|_W@aTbTV>`N$SqRrL|-Nv8LxSntw{P@f=)E~$*S+L{X z>30CKa76xON3?M=B+SWukxLWPuR*OM_C+R6f^=sYCr$@t{d3jx8X?fG@P9OM?3@>G z6n*T!ouyN?1-0uQEN|6naxZfXoKz0}1WiB-$%qvYX)_($p!$SIAVZe`OiYP1ic`=!o@7GuYJ7=^a9Pv#Q~pGdP#r$D zaMN}G-UPBWOH5_xdw+B=uJVz2$3;#=B;YEYJ9-?uHJXY}k^6jO z*S6r%ZqNmdlv2oHQ>0qg!b)REvJfC`&Cyu+2HeWu2W5~FYdc>IHpnb_9s$b2Vw8JWYOhni>!-Q6&agyD1TYs&1b=^gjQB_Qc0q^io zLY-_{Q)rfwF5&4hQ7~2P*|9iMga&Di`fxj@@q6?1NF$a<15Zu@AwTP*1?Y4>sZj|C zd~nKZq>$V5sBt0ZXH+cq)oRDkG6ciO7IVjqH0_9rbQ_dv)G>q`l9#@Pc+(`OWp^ufnN#q~3JwfnZ3SIrB{&SS$AHib5a5K$@oRawbBAb9|Lintu(jr5DyeDw9X3TjaQ3_84 zh1(1=#|B!vzMpZz;JpGPWFnhDVo|#WEUuHn3wJ5*P|aK&Vqkb5ir)ZF{wdy~3}n0$ zO+$brTYpGzq!W-BSKg-4c40JoWCi0855Dm^PCo z!BQx|f(4Az#?gg%If^(8z%p{^=J5T!ubh>YgG$q=OOM%O%sWDgbxD9AxhBSwKq zx1O7N9Bmy%1pbM;Mm?#qg~pn#<&{%{fMyCb%6~YgJSVh-?P`$fj`S_6nA#@@$Cy0Q zRATWm-FARQz;w93aiP*PaZf~KU+vI)U@x2xXlUw!_RL_yk~v@eF=#JqK>kF&i zw^iy0EzOm>(8NLB50uq!DDlhcB9kO?*oIWh*HBVB6{iI*xGi@+CQM5BX%HGiUq`WqnFwHy0(>+>|8_I-AqRzd!(&uT72zEAhJ*d6aW;jsU8m>rBkj{rJD(3(68% zsu>zE4-_!Nl=nycM4<^5KE$c&L01bAh<~>5GL1>wI>gyV37*IZ0>=ZIGvl8!JZrsu zTN-=kyegR@L5+Gah0Z!a-Vwl}F{nX06P>m2=ZftY+@xsM8(72cqeJ`glar%4=!iJT z%M|Ff7Su(A`SKXa#Z}hWWpycAKdEhWEj7z>lzvJZlHID)Rv0pDX(=1w)t)@K(too* z@+d@xe;20iJ1LT#Zy+Ejt)!UBSQ{KkzeyyfmXh%#swu!HX(+&|Dw3vOwHlvA2}`u9 zD!nvAi)-Nh8qW{+cWX~c@R&wsQ6xCdvz2JFtNC6>bt7nUNa+rTELZ#r+=6rGhvd^R5D}WWXoH4b8M8dn#A>1b?;Q9^z~n zptnTEQK!|+jxH)%MehF%nge5l-2>?XH?_Qc0yKaNDj(u!yXG7QH- z5X{MmW{Dk;3#dF$4P5>fhDVHH)BU%HcKQM0xwA2CNTd%JHMFF5_SllG`whSJH z)9O?H5bcuKz7%Ethz`9UPTTAxkn)!1@RN)^pB)w#IMApfUI~X**?(h3MAYwin8Bz0 z(Nb6yvqSY*li{AuE;>dCgTHYshJ@xZDPo~Qx-kI_@~5&Zp;4YG@?XYJc}1g*vk3|r z^q>5RzPhExp#IBZN=!;o^jvbQJH%IyKOXpD#RO zFt8_Woo{hTqu&m}LVw~Hj7C?9{4SAPYZfjRtu@@lp z{F^N_tvY1gmvxkeTNep9I#3+yxQMEwszOwl`?Xcf%jXovQShKFMlv)If^5J=Pxll* zszm;4qnvvZ4dt;hxN9~kQsm5-M@h-cHm6x|URA*mtr8(Y;(tKtdw73mkP#zZFA0Lz zMTt~;u*bxb%DX@UI!2BsT#r#U6@zpv=G1UhA!+prP9dfpWJemoLI>f*UJCOhKzVMR z5QtwH9vyX@Rr~?E6Aweu;O$GOUl#^THZqv(LVN+x4KMNv9A1ZkqJmViZF0RAj-gCM zXUf0JW}7J?EgbIp`jCIqZ;GUlV`#g5$?ZUI;9C$Fa^OW>Adu@XY1&sxIKfXCXJYEp zfkxJ4f+#PP!bQ%Y2LXDhVNo!QviDAs(Vpfh3%+(6d&I$=Kyt$K^)`2C> zVg$R2!fN`e)Mti_CxXE7hcbl%EjG3P8<<4=1ns+`@=6z4WhZ|Lw{JpeG9CQ;8!KIS zJ5#<8P|q0%gAS#8qaJ%ob);>w41{K0ObSyKFvA&*o9|>-W$PQ@BNA z^eTXMYybdoPi^yo6hIRI_I3#^4j)tF|3o`HRU8Y*L$47MS=Znd8NTlO)^0&5q;Pmo z{XnH@GvX_8NeyT~H4pB}IU-C@@Lkd)GiA;)MDP@0r$dHR6WSZpj+VboK+>u-C zbNi7*lw4K^ZxxM#24_Yc`jvb9NPVi75 zL+MlM^U~`;a7`4H0L|TYK>%hfEfXLsu1JGMbh|8{wuc7ucV+`Ys1kqxsj`dajwyM; z^X^`)#<+id0?1CqD|0v&1X>n?Bx;9FMzitP;%Ot=Y{=FK7UF1umbCv+AvtGd9SmIp z2JmUi2dMUF^6%$>0rnbZlXey-1@wb#7vbgXJKZre}WT=ZFem=k*WX7C_z=2sttV#0`4CwOD`S`a50Qso`m=C0iRcixiZA;Bc%s zQ6OWB$mL|)@LbVMZ?WV8J8DqcFN&@BZvAJ0qVrqj(s4r4QBXVfm@F7plV#cG&@+aKIkPnHAL}3F1HA)hU~+f!vO$1#Ew?1}`G9l)%h^mai+5Kwy1+I$Zaauh0oN zm3mQUQ=`8aEAo=0zrm72grj|c8&W!-^+^6zMgm-+SpJe{_P`h~;t1=21VLIQ5n~@Q z5Y=~VMN|LFPH3f|g$TrXW->&^Ab`WT7>Oo!u1u40?jAJ8H4 zWG$)VMcP|!W}{rARc*m|5wga2AycsQRoW*V50efA#4bf zAb!S~>^e$^$)NFU{8zF5fH}*!%vyg~LKHpg$ZRQKYMvdcWmZbF;8lRL0)|H+u5;Mu zG1NcCyfcKT!-_7m9(`gPNnzRQHsAxwl?E0K9-MSP=)i#9QwMljN+-i`3Tf*srV=iQ zkMXP$kl0cO2LUovU^M`kEm8_Xcyi`fNCWl^N>H$6BSK<{e3P$~EwTQPp^$&Sd?qzDf1|kCfiLw6u{Z%aC!X^5CzF6qofFJgklJV3oc|Qc2XdFl+y5M9*P8}A>Kh{WRgRwMSZ(?Jw;m%0etU5 zBsWT-Dj-5F;Q$OQJrQd+lv`i6>MhVo^p*^w6{~=fhe|bN*37oV0knS;dkx6#C<(NN zI}nWp6jx1mr(mnF0)WeYaNI$luzD2^AUKBt?q=o_DJkNN;=(B2{6Gn*2*j!f*@cDK z13cmGh2$<_B~Bpn>~0k4Kw^5W!x@Lj(Hg+Dzdjloz>w=01N0?@hkylC2P!{oXJWr( z_7KQCg2c7s`oMQNPGNtn#S^?ZJ3L-0C7qjj(mHezM5WS|T09&;8wLD{Fsi7lxb`M-)wpIt32BTaYZmnc{z~1!8HWhRbyO4IB?bk1Uq5yyVDo@t!3K2I|7Cxgm z`xkP8K|?&c6Kd&-MdG3@Y7ZRe*yoA2sQh?y?kRzd%tWG_mNC4;j4#j1ag%T=D$KYT zz$60Mk&P293l`hq4TG~n8uiG$aK!o!YY7`o7ie&ZOX@cl#uZiQq{ChC_~8R+CK!Y* z855o)1BmI4yMceNl#^NNkQ<{r;Bm|8Hg}bJ-S^g4`|itx)~!LNXtL}?f1Hs6UQ+f0 z-X6&TBCW=A4>bU0{rv8C4T!(wD-h>VCK4YJk`6C9$by!fxOYw-V#n+0{E(0ttqk&p= z>WuW{NvC^s?BqeaT8TrMPYwrC!y9#Lr3`8Ml=LZud30-JoldwW7Mpgb6+y9?hzw8y zd(GDF^vh5un)8xA0?6v%cmm+YhF~t-1mqO>g2M;*F z?syEvGv`mSj2)`}fW#2o+y@H&GarCRF^luD*n~zdpn0@rdbI3U21!jD3-Q?*gBTIU zYFmL9E_dc72!v@r0n&z562j=IW_*1>D+F6Om1{Rcc%+z9kcI604fO7Culg*?LmbEF0fATG8S@)oJ>NT3pYAXa*vX!eUTDFiBrp(QyDqr z0ZMTr?4uG_Nqs6f%S0g?h`1vO5fo=5S&x4{6F3-Pl?vNGu;- z@#DlNgOBe&xc>u?4qmv3<^!j0gJ|PV#*(cE8oy~g(HW}d-B}M|GRZ3*nf%N)VX#KA z;$b|@A%xp7tcfzcU4dgd?`6D8q^ulRm{^!wRcGl>60da^HEGm&P-9lvRJxuuA5wn- zN&25bHu6u@xM>Zf{X+zfT!--!+TGM9s~fq4R-+V z_?|UkBlrhkkgGie761`vff9fb(1CvwumM^988SW{hp|mZuPsNdYmdXIs@pMCzG9~Q zg-jR4!%Vx_vpN>z%Isudm3DmI{0 zOF*c4aZtqKw#mh4lZwY@6^6_z-;`CSD5~~RRWyR9IR#9T3aw)mO~xvaj8uQS7^zYr zQY=EF$c06)iin^U556ixd{lb)^lM(FfF==3z`^eW)=5a9RqvF-)2?S-(GhS;p( zu~_qBum*q}On@$#08}ynd0+spzyVco0%G6;<-i5&016cV5UKzhQ~)fX03|>L8ej+H zzzgVr6_5ZUpa4HW0Ca!=r1*asM@)$8#hw6Dz$x|tp2NSOdXxWj2hXxbc@@`=03aFw zF*E>7XaJ1R0O5cEg8%_$0DqYPu*d~vz$h*ODR2l&fIwUU;@}LI09d#JWk3ZL00>kM zVxV@F18A5UM8L%+1^F;7Nr7TS0*Md_WI!Jg09r%=F%Sc!Kn#%p7({>I1bM)?^MKK( z07FiE4LRI2=GfDkBTi!rY{C`=z_2?7fw(LUfnZ~c0|ZzW;=r>O1wfD}gn=?712B*b zB0wz(0GK2J=#U0NKo*GrJR|_nkO9HK_znj+a5qDNrW_1_;9>;==qMI}K&1)=7*GX4 zKof-kMhpPBFaiL;^#*?cbcy@{oZbW-i{wWZt?ATHQkRM8#IoW^?;cd~c~gzeO;U(Kap)rmC3$ delta 13843 zcmV+uHtfltodN%C2LCny0HHOJ1~vqVm>2?)Kx!8;06PFs08;=U05AYB05JeA05AYB z05OrlpMQ6Z05x*}HF*Fn#uClxip|my)t@MI+lov3!4O2^!Y5D*?oI9rhX6vuY96-q z{9Pebpfjegt4i6n&H1w^Y9{nAw}FmCNB+`hU84VvjX--*ag5#?iJEE zX6>fZuKj%WC-6P~|Bjn#ywq@^|3$h%UqXyP-2N)J9Y21I>gx^T7CFKu1%yfHgxPlJ z@PDg77iImIa*pBwl0HpuJcdkI>5t(A%q7;>{9!)Jk|2=+rfP5}z)TSg34kHdJB0SG3=F0Qs0l<1B)wsC^IRvmkg(-{-F+roG)6c}nwt@JH(SV1M<2(Vv49y-D>CP zK96*anHJph>9BR1TQ~gQK);)PoOA)~vs~x770K~^#mpRb zTzqC34}11`hCe+Y=`kW^nEv^+4|s>P&q#Q9{%Q0NIE!nUP+7n@oc0HP!Yc?KD>g+&I?$wB~wD48;{;Pgd50eE&bq-$w= zP$JSvk4KLKbY zrX8krSTTr5qav!Lmyou$nt!C^rU;-_IW#4L(O|QujJhfBr+JVc1~Gyv-PzUFFBp*9 zIzC)y$3Ue#NRhNi+_qP!)_#4dj>PkyI^GrIXbWZ=tD^R7TGup@S+B4Q;m(%DIq#bZ*F_DeT2jIjeKQhKPnF}) zl{Ue^LzXEnxl)A0`XvZYc@Dhup}%qp$Rib>ic!6cRzGZ@%SYQu@bXs52~;kcQqq6$ z=yVMn5r*E2kkRkNZGYXb_HAO$b^!p1c63hRz zRC=_K1N9OX8t&_38i#@mLIhWwV+*N=qlS#L3S&lgMOxEO9uYVPDI+&Z49w;Y0{`cRKuN}+nhO?~V2x83C`F*lmKH+jt04{n0wg>Y_W*Nk zv7xqm(T%zw!S3eQ`ypU>>zbk>T46#nl>Lsp1bDQxSDeXP1lNO@viNEc1_H)|Xz(R) zbOcKQz+1CqwSSYb2ty=Q6hlB zIjq{Bf=F|Ue3VefW5v;ITlX^nci;b2U>qDt0g!97O2X;`6Rm z_#Iho2jlOaNxgvYbK>LWI*QOlbhyM=a3IG$?V$ZjfQPCsr?o90GOxTk{ZRu6kTg7b3d(~yX#}*?AIMfSI^FX;7B@cJ zj|8O`H31C5G8`KJSRO;LHxOV=Et-yM$O-^HJATki{lYbzZj?w9t(r(iH1+SL7rhCD^w1prXKV;y@Ea^<1QK zd{f90BH0?cMIsobtIRNI%yI%x|3qk_ZGU7VBs4`E`K3Tgsiq>)_sHf{7r`GW^pi}P zIoY(*PQXKWO}v7n;Fk@$eMGn|qe4mHgmQSS90<)RTqyz9XH}I-g=zbmjKy`~PtO%G zR0lq|eCCZlN{7`%=$2x0pe@rt7PJsWpFrLdU6eTR<+;Sx!r(%mkMOj-g^Svj{eQ>{ zu2_X%GZ5ib3J8G=_hdwb$AObyu^^bN1(1>13c1F>K{t!P)` zBCV28&Oprap`sZHC`u^&UY|3xuzyeMdB03?y~F5{T@olnl%!I=av5WL%^B{38EudGnZyi9wCfE$pD~cq7$*HkQ@5-py!OskxkprF)$@ zLFppgHED0Ga%# zRs)ESvgpxAdk&}R_>|llumpmicnr`UUCy;AKo|jSq77^jrmc`>f}`B-tWd2wh)DRg z^Z;|LENK*!`=#xZQ9d%iB4k?)D9I4RAo2VRFN$^2kGx`2vP==nvw!_A8;K&7I<6Q& zlvO~VaHZSUy;1@EMU|Aqd%+)!8$uiCkVvALipHh#Aq7dbkJVwQ3oUmdz$tenpyKS| zoSB)gs-D#wa(Ao0&9l4?a=z79~L3D8x_69%hYUD`@dl8H7Bu-bCEUvz$N&E7sDo+DnAZL;d5 zzU&n5LR<~U^?zxJCRNToik(p%m@&kkEI*)hCUPu5Z0R*Y+lK=vX%#;ci855 zH%XZjTwrd=b4zf_vZLm4 zL;yWY5~^RUz~qMGVB|!?BlBDKxk}qnf^4HC2?m(cdei`OFTMi6s~;ZPagxAFx`gDN zNh=V!SnTuE<%}&j37+U>?(*h z+;3hq?SC{`f{A4<(yNP?dIev)e7=yCe(r8fH|q|*+KjwZXP*rUr(o{Ri)70V2Ww(~ za^u*L$oKZc7_1MuW|R|l9K`r9Bz+|*%q_&1yp2+x3J2gO4M=<@f>@Fy4haoAV~i_v zEsQ~{(1<=hJ72p$EvGSPM=B7=<`zKfpjC3nvwz^riZ?UkHfSlkrSth1>6ai+wu{>U zF3C+Y&^~GaYCS^2ru8whP3WLlLSRhd(Xb0Xcm?U~tPNzDwC-XSP7(>XSj!Phvl5GL zEEC|pt5I-qP&P8)amK%kKa6e}TvEjC9d()Wxe@%dN z_{sc|>YJ9CmZS`=WAF7iJ0v`;Tr*M1+Q1%K1;`C-WC3=mjb_}2B98%vyJMaKtHr*X zxd{HZTq0W97==_RRq&Nsfo$~mRTe=fXn+4jqb$^c&n`hd&Bz6G`niG-M%hGCY~OSA z3x+AknfYKZB0I?pF5J^HoaP%HU1FCIP@!W&;fiu^?J8TT?Fj&a$wGr5OMGRxfPXYF zAE4|_3ga?DiWHA#G5oIRF$8(SfBx6c>AmHtQamECMyg4h0w;u4sJ!*f&Jz$pCzq5>R1*tG)<+nEp)@rW4slVWj9!WJaA zAREiYs397e8R2IIuUG{*<%JFLcHjlY1!d2yy)z!vlx=N=$`txAF)JSL!%W2r$xa%yO)G|uGmV@ z;WG~?j7UE_aWej|BM0B zF5}bzxDjrqV)pFjMpy~#CW#xJbhR8IGVonCNVi8w4?GgRfdCW)9%w^@yh}7r1g*s) z8H>_4-U;7)GWmX)lDG8iCx2+Ul@ql=B;E%LR#bL~pYXIg(cJqbf8cxf^4LB5jaGx zF%un|{`Q<_8ulQ83|HQd(^)YVu(oVs;|ce{AId19R6;!H~SA z*1G@@Etj&r4TVXPmWZO-Hj@b&5J5Q}c%Sl?KRahg$*jD4DFzK7vqy$0u+|`}>P~Q! z$bhp7PXZY^!j)5szl9|{szoUnxq&=S<4oB?On4m2LBXJ*Ffjss!mMZjEj`$7oe*Uf zLkO88Fk^NhzjK2ekvv&>Q~q#m_0WgIEPKfv=`bFWC{^96m(hu zn}svgl;AUJ#F4KZ6cp73VQwn7WBSih03$#oh}9I* zXg5xJaB)E4o|ce1VPoe4;YeHP6n6_~&2}PVk^|q=_r5c89T&4J8P<{r%U~cJ=r&Wa zc9<9qAp~9_MvHm04j`he zB|Xa_2!;D%5YgysIHZPz0^LyNc^y|18gg=ROL@z?MO>S_*sHdq)wu4CR2{oQqUdPY z0SQBQN%kQCd_-zDT-MlW4zX3l;=}+S(2Sp+G?oegx)|MRtNzX8^ zD(}lhQh}ftRQvk_P`>31Zge;S&@9l-tPa7dWzj*G#4=(~41}kXrLe+~Et0|P4OK8O z1>|Nl6MqCxoEJ_w3CJ)3CqwHalE4@99|k8P2Um7G-x}r0AnZ&Ez+FQySsLllKDJDG zjeK_(@OUodoYNk-Gb02dR___G6Z^`wOG(CBAfsXG)f66Rv#ncD{m6nnp3Im*Vc`1J zC7@dn87PcUu_sa!5wJwkI0R$g4INNTZYyeIrGF5q%$uz=v zAEe*Bg+fOI+w)noTx?p1`1iU%gfJT+9rn|eAw%nfL`G@pU??IK*u6E~9O3?%$vIj_ zM}IV&Kaqe}9^-+6loL&cr6*~~KRLbn;A|?5*ImPj0Zq=HcS*NRF_ug;fSeAr@S zO&?4sOb*nQXDNyaWg5=&WZ=g5wN7JYG~K+@qx~0cLuapwRZOh z7?)O25`fW@y{v`i`6Qql34fTd zh?O+x&kuK22{NfD<28c>Tb0EOj0eWYQoZf+*KVxX= zR;f!T!ox!wizNKcr&G{D?tlMarEbmt4@!5PT9~OPsLm57dUQ_#8mr|<3d8`T%RGcG z0|4LPfPmwR`g~hZ4iX7kpZaa(8lzYbB`r3kbn4D$ngBpR8~fvWnkKiHm2FIekO9jx zjl8)$Q9#YNkZ!a{0e`K@j=piPy0pVIgxKcnQ3%C5COU_b3b8%vVpSSoguSq6}>j<$=qv|A+(0%v|4q1{D$AXktBbL4S%ab#sU`uKDm!5y)wP zJL*ua5&La9O4yY!vhC_g3(bAF0Cg$HgpY~{;S-#4R60WVA(#m>fzv1osE;Zfg>3m% z9WOWvJ&8UVCmD2I^%nf&*In3Ys1DP0YEAK0%V@RkJwczOM zg_%)qr=O3itI#b+4TcxWFX8K^i%4q__GJO-xnjssK+nP`24)z>1{PBdb8 znj<8k34GMek+ET_ovcviY|}{s4F0-0S74ur2u$TwgbYnfp$EQ=y^*o#VpMY|QZ zx4?+n2!Ho*_g14dt_wXNdln8iKPyiAS07J#7S^)u)e zW@))AnDnPj=N}Mo!#0)0lQ!eX?*4&$OopV3_kX{Vjzy9iJ1~(HzY2PX7oq5*lUQL+;Tpgzm1II)bxRRv&3S`#2x5)!g zWx^OEHlWc#STxtnhMHTBz&o@>YneCWY%K!E*8FI=gDssA$ z(|-={I)+DgHkehI9e+taYTg&{sD&eOl9p2B^hkT@RqC^jiXlQ$BCn(Spfh+PGm4QJ z>fV=aFg`tpCeOm>mk&Zfm`b$13Zju~v!~3ebaOCpehF0FVV-EYLXF?x;31WBTL%p> zdCs~Z3D#ChGZrBodW46x2~uf9<#F;Ef^#p^L%pU?%KR?SNMP!9;qM`x|5w zyw8u*hMBVl%5bt0*v2lpyG3gn522#Uq)8;Hgqh}ipk;>>^urv4To>T0RH>Dm8hc*JB^E}M0yM6v-nkai&C9%QgUF? z^?Pi*z81N2KFhKsK%b8(k%cY;cz-R=bd1gD9_wKu;7;HSvtF{`ekMTSr>F0OJg7^T z1Iz1_!rIL&>hF_M@XQbpnC4Iv+Lob~60Yg5jMS`a4GO}<$T|4qP##k>q|LJo8e*hw zzzFIbT5+qGWDv$99WAk(kV6Te9`ty68@pFFJ{{fxFSh0z{w>H+^XQ#Ujeqp<(QD9i zi2rwNUW?Muj_|L^MpgsqislBRS%R63{jaZ+1&#A3Lq7xgpoe%ukZ;_H@KFj)&Ofhy z|NB)GYRO@zWpE2b2Y#T$$MG3G6L3NM%;)PIvh$Ku@ZB;(zHNV?goE>XZnm`M)ATXC0Q_ITN$IJvTIL*x|xvQCaJ^(W8h}VXlaWvg@22xxieqo!GDu8 z`+av|9BOGnBy@`yg~bz|whx5cfZBQj^q(gPBoK=*sw__bj@aK)5aLKmD2pY7LhanV z!ge*#e2vVpSa>Xp<4ld^KuC*T*XhMaBk!o+?5^2yJCt9i1C4b`s+DKsD*so_;o1s< zr)DFrJ1{RVTGJKw{eP@gwtmlUHry-O$Rv7peGV?^128TFJstE4ttQ5B3*yuJ=GEc9 zynK_eW+nr3+=q~22;0#)`Hn6Hxk2E`J-lH28id3UL=}Rd7}nvx1^lF7Dgc-ed5=<* zsQPdO_4K4JGWF=Jg^fSJ8t5=|y7s(iS~JVHWi`Sql4+IZ-haL@v{7ZQl)5|D1M&Qt zEE!_AgA?e1^Xi8nveAOjAA*P}Q9!>g)`#Cf!JjDFntCpUEnd{k^!#{BLa!Y6E=XK zlbr#bFeuKHa-;gnSW}{oHd#WE%ff9X3mr}M+0%nr?+-RNhVXdREpBL^Co1PGQr#-E zxbe9O8W%es6O6Qcur>`PRgOa!a+hPV$ui6X;wpKRMSr2ko-6f!^neUCH0Sp*Ng9wLMGI};p zGs4UQ)U<#x-BbJAVB-t8d?tJa(|~D3)9}mh@j;2uDW$6VYsuCyP6plp9EkJvYSTpN z+=-1Ku75z9?!oWuk>=`shJF-~cNeG0RqQNsaR77`u)?HMK3*^+a!Dy4R=;v_!@BAH z5}bJyy3hYNskZF}Mcw*t84Oj^cWLl~A$28Ul!fKG0+gAKUYo8-;!n!}0Qi|{%#8of z$C0X=V@Zvh+=wc8h=8PDa<$|q*lQorbcT&-5nngv7s`L zNKXzi=ha`58!=Bzk0< z*Jf@gJt7;4t_2>P5cBBf`?kZ%@ACY60CrX&1xn-nau-Z{x#O)fVjDi1I?5bKY>jWY=P2lqbwccvxNtG;W!lM z*=lfFdt)q^3R5{e%q$7etW(a1kpHIMr}D}ZHXQVN3d@o1P@2^HyEqp?=yP-jx!+-6m?4DV?Xu>KxJC;>Dg-eZ$& zxFSXBLfrb~;=o|LGQ9hI3&Vy$izB}(aZ{S`CYuTZlo3F=H?bZ8TYUP2JfsPKlc;Jf zpGcd#&~z#&Stn4LNQ@2mSBD56Cp1Lsl(Z8y zkgP_8W8F~X#%iM1q>#qb4rKm|5+cQ^Qdn|l5J`nM&wvuea+m1)H_wzXghrpp4WuFj z0>^MNXo-Ql06|1~t8kp0uu$Wa3YJ_aoDUkUR0Ipb1%xJ!7Xg%p*cEej4}Uo>D=vkv zniWjlaA}g?q;`&hNMgvT{;5UyRN>@iWuxhqxt9<`5Ly5G)F9I<@`cW@#z+#|xf%+l z1@@%|BTF=RLa{Q3C`7eCsC@uwQ+$#KW>!~t`tHH0gcXwgvHWHHi}joO?@H>GB!<@;`aL3_vsXx%zui+xm4M|phSGH%RHydJr zbSetWud?hzh*E%Z37bUOe$6VV@CE`$k5Ce{B}Q+aF_|bGNdA;12!C0N2%MB!!HCsJ z%pP7?AXcGrnJDH9WQg=f^UvQqpfn`GbSj3#Wuap3JWQJcPA;PYvJ0r;u>|P2LjXn4 zs>*`M9l(BFm^1UWN*aTxZ(LMZZwvIzTX z;HU-Fbm%OV#$hAJo>g}oL|9B2}Qy2KTo#=VagZqTRx+u zt!-RIkpgV2jV2m`ob7u`qk$OV-WLM}LF*042#e}!9IzgpF0~`4JThv1(bAPlIzpsc zOFuUer8eOai+@BBM8PJ&LSJDon*<*tr{7VW7z4G38{kb~} zBwq{&fN_^rzXv&?OEIFF06h@Fia9A0o_;qUKDeSsZ}C)E+}ZjN3RGjLDu(dH=pnF@ zVH1?Toj)CvSg)E`1N!$<-QbYmVfrj)uBZ@gfoik0N+?m7ztsq zNYtKP3B&-0lYRQT4JBAAE1y9EowX(l46Wck=2hFpQiCa?W6c_TO)Hr#mZ2j+JYv`1 zCN1lhsDDElL2dBp12nZHd1W$6A6qugsF6{flWT;oukph@8@mz_mAAkJ>m>&Ww)0CZ80E%oex~kNXHJnqK zxXz)o13Ji)FNzgp9ApDBkU%3wRR;Pp5t_-mBFNY@H(m2MGm9Sxi-#CwY|y~i1X!Ph zW~MSUz1nAv6Z|M85S)dv!xmc#eKnupBF6`~HMhtob{((T+ab{?{bg`b?S3NoR(}e1 zQM=!_{&8G+;WiTi$VVAjQlwuB7RFW45}TNY@}9`ukhrnY4sTG{fn!=Gi3*}L84$(- zQf7N#@#0V#XzGwVPQDAE5ba1PoEB8Z^t8qZH(rRP&XroL4Ugh(-&^y}5w)Cyiovo3 zc%3ejxnDEbiY-HgP%KvRK)Q~HE05He!=X`3yBTu|(ImV@ zrq7mPEI|X}BT^8C3yOfo0hDG}#AK0D>NYU&8Gu6pt>Tvn*=Y%{WEp1YrNY%zI!FPG zhpW#g;|XVsB2ct6)gVbZ*?;#{UY9_zB{D@Wu7tK|w*;mVMFI}czkdvZlxTApAGq2+3^L?JJaI|RlJOQsrgsXAk6vgV zff54QV@PDEi?K*W>{|;>`JZ$t!&*7p3q({sy-cBFPOVr<5b#9Hw11HI+2rXo434@Z zekC+|q)YDr{r=w9ZzU&$v%4wJQ<1MOoq065I4?|`nZ6vF2~+_HQY*7;mc8#GjomZOnaxr>dd^W-Il zV-WIdcnCH`EE8r}Reup^Tncxmq&dV=HDGAa@iKzkw#nUMq8QT!7){c`#oS_5Z1!V{JN}0it8B5ym zjwoc@Y|hgDhIKHX;^gl@Rn}e|ztr?!e!uHN0~Mii#KSIv5r3H~XwyY|0Qy)DNtfiT zAk`m0NTIBa+?_>_ZVXW{gGen3scW^rZ zwPCJLg17)hrhY22P0gx;9bC7miQnSEo4QBYM_oQ_c~0}dVUk3%N=YR`EJp$d(JagR z=S$E3&A2nfbbq1qGMBY90ljS3M?mG=>j8VBqm_oP(gSN8nW6ar814XMKhhlGBrd8D zLqz|jj4+X;xd5`1H)J|nt^3sOgFX&@jl3HS&lD+N=#suO%aUFE37V~20~qj#)15b< zpN|d<1L*7{JIg-!1yE1V=Vk{Dgku%%=Ikb@4`By&Y=8BNXb3&8cNY`XA^2FC#gjmH z&@bY8>Ro|^Sz&h)!CR_y&}R6Tmn5pSMd-y`&@Pn2E!1{b%^{1=0D_TT+)DfSkApHc z9v}2o{H}q+X>OYT*v)8v(ek50P|7_cl7~d(i#giA5yaQJ`WNRsLGm;5Ka4b5uds) zrFDfTGu_6@i3>W2ZxI6`;gVWsKdL3{>1f#>jen7|&&+IMo(eq&4Y0?Zg@N!yFMaH8N+g*o(IpOb&an_UewQh))It~x`Yz*LGJ3s;rcaW zhoC~T$Xbb5f?}%~wTvg)Zc%A4lQV{4ZcHchb;V^{1_$XpNNpu9FSwmCOEBwg=($_k zsek(n7BlFUJ3U-L={k65Ab@7VO+GP|Q7?5E07DUIw9VYqV zkuyE^A=tK?H;8%2nLZ5p5#NfG$vWOwFMll;9I+KqHgQ)hS4D33306$JUrrf`d|7MR zgIdgbpo3KTZY)qdICzTa-#M+OLHkJ+NwL5li!EX(JgjN*$1X{sI{>4{4(4qDNr+yi z54F%QFdP8M@qmzoecOV2P=G}@E}zNtGkecStMFT1U_45|FH^V1#1#LWQp+K-1b;@Z zQiZTP{W#6}Fn3Q4i|U>hRW6F(0B7bSRHw{56UnW$qOs<00E@jqU%r+_;s3O!PLzcX zN4ZRwqBZ;L)#gAk3r4%x z1N2BHOp_$62`1Cv5!@o6Fwo~SQhzD?D9#NM5u&qQyl@FB2C8&A7ts$qp|l^OtudY^ zz;`DfyA6U<;Pd?t229m`FgH!6wg_(WR(uIfT$!b zZ8GCp}j)x%IQ=%nVX;DMJ-&K4K zkS1q&;Z`1`%3KR!TXl${(0{2DHU6zv2n^kp>GFd3qy&Sjr9i^KlG5)zrRcsrgLDNs zC|VWeDhoWORj8*#qMEU4DUu5vTxu7w3#~)k^+GS@baR?~RfTrsmwHu98`nibt|I5| z3wmICI4KOFjiFl4aVXG5xTpsrbw!}NWgG=XlFSy&5L>z$X21oYaDOR`fJ@U%p;D35 z!A-J3k}GUS72A{~+7u;_)vTtaHP-Abwu__-TD3qbS>Fez?B30$6#I)%n`Xq#7BL1! zsdND{5r9c;ZT1DF=$h(&qtH$G6Q*}y3wTO{4kIA6-T45FMRG_obnTx;=`Onx0E1Rg zMQnm9PX*A)syh$|%YPJMX*|HaC@PjBQbvFFC+x(2p;b>RSEgl+fpFS}HcJwGz@5;b zb~3?xv(7Fy9`R~Uaj)+Q+y**H&*RT|4rdv?_)P2>E~M&&{J-Jq5Rf{T2VD2Ope(ws z*6#pl2tka}bI z93p-b4jOdcMJ8Awbp<7ga+H*)MZyu(LFq6ubc7ZNbCUu_0x1wSML5Cnp^p&rV?7Rr z8xZNnYA_(+qkn3Q3^uUfqaFm;iW(!dSOGePNE2v8ktxLm5~d;GlH&3f09G*GLqG@^ zHz4$(@CJY$08@b+gJTYmGyuW}kRy;=K!{;u1bh?BCxEa(ZGwaWFgFE|4^S0wHh>)f z^9GO23QUOE#m*M3NUTBV<<%)<%j{bIspHHDt{TMy?5g9I9LI7fa_EO-(JX|!20z{Z~ z5($uoWvl?=rTB3ud%)7_oVjHl;4?%+`L4wBvVDEqdw!i}IKz{=|fO+Zx*_Z~-U>X^KZsY;Ahz5Qj z7+8Q{;sIlb1#SQoSO7_20S|BkX3z!QuoZSdO<4fXWCERV301%v)B*VUk~S}*G7OB6 z9~?(P3hG23FL4`UhoQC}hLCC-gl&sV#yt}l(@b4VjinPiFH%7991d|n-xS8^rZqz` zrhgfXkj!L;Vh!1chh`idm}hp_yyOL^AS^jBoZP^#>H;sQetqXpzHj^Hw~FScBRldV zuN~T)bK-+%6c_lQw$@=?sDgVi64`*_#0J(NG%o>xcneFwP+kIZ@DLY(bhHB1pku28 z9Y_}HK(kT>Vvr@&ff}3$l;Axh0oe!;NL)a9LIbJr8_$5!Yz7lxE|~#T$OFueU}5+s2qhr)hYx;X|l - - + + - - + + - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf old mode 100755 new mode 100644 index be784dc1d5bcb92ab155f578f3723524a3dd9688..1413fc609ab6f21774de0cb7e01360095584f65b GIT binary patch literal 45404 zcmd?Sd0-pWwLh*qi$?oCk~i6sWlOeWJC3|4juU5JNSu9hSVACzERcmjLV&P^utNzg zIE4Kr1=5g!SxTX#Ern9_%4&01rlrW`Z!56xXTGQR4C z3vR~wXq>NDx$c~e?;ia3YjJ*$!C>69a?2$lLyhpI!CFfJsP=|`8@K0|bbMpWwVUEygg0=0x_)HeHpGSJagJNLA3c!$EuOV>j$wi! zbo{vZ(s8tl>@!?}dmNHXo)ABy7ohD7_1G-P@SdJWT8*oeyBVYVW9*vn}&VI4q++W;Z+uz=QTK}^C75!`aFYCX# zf7fC2;o`%!huaTNJAB&VWrx=szU=VLhwnbT`vc<#<`4WI6n_x@AofA~2d90o?1L3w z9!I|#P*NQ)$#9aASijuw>JRld^-t)Zhmy|i-`Iam|IWkguaMR%lhi4p~cX-9& zjfbx}yz}s`4-6>D^+6FzihR)Y!GsUy=_MWi_v7y#KmYi-{iZ+s@ekkq!@Wxz!~BQwiI&ti z>hC&iBe2m(dpNVvSbZe3DVgl(dxHt-k@{xv;&`^c8GJY%&^LpM;}7)B;5Qg5J^E${ z7z~k8eWOucjX6)7q1a%EVtmnND8cclz8R1=X4W@D8IDeUGXxEWe&p>Z*voO0u_2!! zj3dT(Ki+4E;uykKi*yr?w6!BW2FD55PD6SMj`OfBLwXL5EA-9KjpMo4*5Eqs^>4&> z8PezAcn!9jk-h-Oo!E9EjX8W6@EkTHeI<@AY{f|5fMW<-Ez-z)xCvW3()Z#x0oydB zzm4MzY^NdpIF9qMp-jU;99LjlgY@@s+=z`}_%V*xV7nRV*Kwrx-i`FzI0BZ#yOI8# z!SDeNA5b6u9!Imj89v0(g$;dT_y|Yz!3V`i{{_dez8U@##|X9A};s^7vEd!3AcdyVlhVk$v?$O442KIM1-wX^R{U7`JW&lPr3N(%kXfXT_`7w^? z=#ntx`tTF|N$UT?pELvw7T*2;=Q-x@KmDUIbLyXZ>f5=y7z1DT<7>Bp0k;eItHF?1 zErzhlD2B$Tm|^7DrxnTYm-tgg`Mt4Eivp5{r$o9e)8(fXBO4g|G^6Xy?y$SM*&V52 z6SR*%`%DZC^w(gOWQL?6DRoI*hBNT)xW9sxvmi@!vI^!mI$3kvAMmR_q#SGn3zRb_ zGe$=;Tv3dXN~9XuIHow*NEU4y&u}FcZEZoSlXb9IBOA}!@J3uovp}yerhPMaiI8|SDhvWVr z^BE&yx6e3&RYqIg;mYVZ*3#A-cDJ;#ms4txEmwm@g^s`BB}KmSr7K+ruIoKs=s|gOXP|2 zb1!)87h9?(+1^QRWb(Vo8+@G=o24gyuzF3ytfsKjTHZJ}o{YznGcTDm!s)DRnmOX} z3pPL4wExoN$kyc2>#J`k+<67sy-VsfbQ-1u+HkyFR?9G`9r6g4*8!(!c65Be-5hUg zZHY$M0k(Yd+DT1*8)G(q)1&tDl=g9H7!bZTOvEEFnBOk_K=DXF(d4JOaH zI}*A3jGmy{gR>s}EQzyJa_q_?TYPNXRU1O;fcV_&TQZhd{@*8Tgpraf~nT0BYktu*n{a~ub^UUqQPyr~yBY{k2O zgV)honv{B_CqY|*S~3up%Wn%7i*_>Lu|%5~j)}rQLT1ZN?5%QN`LTJ}vA!EE=1`So z!$$Mv?6T)xk)H8JTrZ~m)oNXxS}pwPd#);<*>zWsYoL6iK!gRSBB{JCgB28C#E{T? z5VOCMW^;h~eMke(w6vLlKvm!!TyIf;k*RtK)|Q>_@nY#J%=h%aVb)?Ni_By)XNxY)E3`|}_u}fn+Kp^3p4RbhFUBRtGsDyx9Eolg77iWN z2iH-}CiM!pfYDIn7;i#Ui1KG01{3D<{e}uWTdlX4Vr*nsb^>l0%{O?0L9tP|KGw8w z+T5F}md>3qDZQ_IVkQ|BzuN08uN?SsVt$~wcHO4pB9~ykFTJO3g<4X({-Tm1w{Ufo zI03<6KK`ZjqVyQ(>{_aMxu7Zm^ck&~)Q84MOsQ-XS~{6j>0lTl@lMtfWjj;PT{nlZ zIn0YL?kK7CYJa)(8?unZ)j8L(O}%$5S#lTcq{rr5_gqqtZ@*0Yw4}OdjL*kBv+>+@ z&*24U=y{Nl58qJyW1vTwqsvs=VRAzojm&V zEn6=WzdL1y+^}%Vg!ap>x%%nFi=V#wn# zUuheBR@*KS)5Mn0`f=3fMwR|#-rPMQJg(fW*5e`7xO&^UUH{L(U8D$JtI!ac!g(Ze89<`UiO@L+)^D zjPk2_Ie0p~4|LiI?-+pHXuRaZKG$%zVT0jn!yTvvM^jlcp`|VSHRt-G@_&~<4&qW@ z?b#zIN)G(}L|60jer*P7#KCu*Af;{mpWWvYK$@Squ|n-Vtfgr@ZOmR5Xpl;0q~VILmjk$$mgp+`<2jP z@+nW5Oap%fF4nFwnVwR7rpFaOdmnfB$-rkO6T3#w^|*rft~acgCP|ZkgA6PHD#Of| zY%E!3tXtsWS`udLsE7cSE8g@p$ceu*tI71V31uA7jwmXUCT7+Cu3uv|W>ZwD{&O4Nfjjvl43N#A$|FWxId! z%=X!HSiQ-#4nS&smww~iXRn<-`&zc)nR~js?|Ei-cei$^$KsqtxNDZvl1oavXK#Pz zT&%Wln^Y5M95w=vJxj0a-ko_iQt(LTX_5x#*QfQLtPil;kkR|kz}`*xHiLWr35ajx zHRL-QQv$|PK-$ges|NHw8k6v?&d;{A$*q15hz9{}-`e6ys1EQ1oNNKDFGQ0xA!x^( zkG*-ueZT(GukSnK&Bs=4+w|(kuWs5V_2#3`!;f}q?>xU5IgoMl^DNf+Xd<=sl2XvkqviJ>d?+G@Z5nxxd5Sqd$*ENUB_mb8Z+7CyyU zA6mDQ&e+S~w49csl*UePzY;^K)Fbs^%?7;+hFc(xz#mWoek4_&QvmT7Fe)*{h-9R4 zqyXuN5{)HdQ6yVi#tRUO#M%;pL>rQxN~6yoZ)*{{!?jU)RD*oOxDoTjVh6iNmhWNC zB5_{R=o{qvxEvi(khbRS`FOXmOO|&Dj$&~>*oo)bZz%lPhEA@ zQ;;w5eu5^%i;)w?T&*=UaK?*|U3~{0tC`rvfEsRPgR~16;~{_S2&=E{fE2=c>{+y} zx1*NTv-*zO^px5TA|B```#NetKg`19O!BK*-#~wDM@KEllk^nfQ2quy25G%)l72<> zzL$^{DDM#jKt?<>m;!?E2p0l12`j+QJjr{Lx*47Nq(v6i3M&*P{jkZB{xR?NOSPN% zU>I+~d_ny=pX??qjF*E78>}Mgts@_yn`)C`wN-He_!OyE+gRI?-a>Om>Vh~3OX5+& z6MX*d1`SkdXwvb7KH&=31RCC|&H!aA1g_=ZY0hP)-Wm6?A7SG0*|$mC7N^SSBh@MG z9?V0tv_sE>X==yV{)^LsygK2=$Mo_0N!JCOU?r}rmWdHD%$h~~G3;bt`lH& zAuOOZ=G1Mih**0>lB5x+r)X^8mz!0K{SScj4|a=s^VhUEp#2M=^#WRqe?T&H9GnWa zYOq{+gBn9Q0e0*Zu>C(BAX=I-Af9wIFhCW6_>TsIH$d>|{fIrs&BX?2G>GvFc=<8` zVJ`#^knMU~65dWGgXcht`Kb>{V2oo%<{NK|iH+R^|Gx%q+env#Js*(EBT3V0=w4F@W+oLFsA)l7Qy8mx_;6Vrk;F2RjKFvmeq} zro&>@b^(?f))OoQ#^#s)tRL>b0gzhRYRG}EU%wr9GjQ#~Rpo|RSkeik^p9x2+=rUr}vfnQoeFAlv=oX%YqbLpvyvcZ3l$B z5bo;hDd(fjT;9o7g9xUg3|#?wU2#BJ0G&W1#wn?mfNR{O7bq747tc~mM%m%t+7YN}^tMa24O4@w<|$lk@pGx!;%pKiq&mZB z?3h<&w>un8r?Xua6(@Txu~Za9tI@|C4#!dmHMzDF_-_~Jolztm=e)@vG11bZQAs!tFvd9{C;oxC7VfWq377Y(LR^X_TyX9bn$)I765l=rJ%9uXcjggX*r?u zk|0!db_*1$&i8>d&G3C}A`{Fun_1J;Vx0gk7P_}8KBZDowr*8$@X?W6v^LYmNWI)lN92yQ;tDpN zOUdS-W4JZUjwF-X#w0r;97;i(l}ZZT$DRd4u#?pf^e2yaFo zbm>I@5}#8FjsmigM8w_f#m4fEP~r~_?OWB%SGWcn$ThnJ@Y`ZI-O&Qs#Y14To( zWAl>9Gw7#}eT(!c%D0m>5D8**a@h;sLW=6_AsT5v1Sd_T-C4pgu_kvc?7+X&n_fct znkHy(_LExh=N%o3I-q#f$F4QJpy>jZBW zRF7?EhqTGk)w&Koi}QQY3sVh?@e-Z3C9)P!(hMhxmXLC zF_+ZSTQU`Gqx@o(~B$dbr zHlEUKoK&`2gl>zKXlEi8w6}`X3kh3as1~sX5@^`X_nYl}hlbpeeVlj#2sv)CIMe%b zBs7f|37f8qq}gA~Is9gj&=te^wN8ma?;vF)7gce;&sZ64!7LqpR!fy)?4cEZposQ8 zf;rZF7Q>YMF1~eQ|Z*!5j0DuA=`~VG$Gg6B?Om1 z6fM@`Ck-K*k(eJ)Kvysb8sccsFf@7~3vfnC=<$q+VNv)FyVh6ZsWw}*vs>%k3$)9| zR9ek-@pA23qswe1io)(Vz!vS1o*XEN*LhVYOq#T`;rDkgt86T@O`23xW~;W_#ZS|x zvwx-XMb7_!hIte-#JNpFxskMMpo2OYhHRr0Yn8d^(jh3-+!CNs0K2B!1dL$9UuAD= zQ%7Ae(Y@}%Cd~!`h|wAdm$2WoZ(iA1(a_-1?znZ%8h72o&Mm*4x8Ta<4++;Yr6|}u zW8$p&izhdqF=m8$)HyS2J6cKyo;Yvb>DTfx4`4R{ zPSODe9E|uflE<`xTO=r>u~u=NuyB&H!(2a8vwh!jP!yfE3N>IiO1jI>7e&3rR#RO3_}G23W?gwDHgSgekzQ^PU&G5z&}V5GO? zfg#*72*$DP1T8i`S7=P;bQ8lYF9_@8^C(|;9v8ZaK2GnWz4$Th2a0$)XTiaxNWfdq z;yNi9veH!j)ba$9pke8`y2^63BP zIyYKj^7;2don3se!P&%I2jzFf|LA&tQ=NDs{r9fIi-F{-yiG-}@2`VR^-LIFN8BC4 z&?*IvLiGHH5>NY(Z^CL_A;yISNdq58}=u~9!Ia7 zm7MkDiK~lsfLpvmPMo!0$keA$`%Tm`>Fx9JpG^EfEb(;}%5}B4Dw!O3BCkf$$W-dF z$BupUPgLpHvr<<+QcNX*w@+Rz&VQz)Uh!j4|DYeKm5IC05T$KqVV3Y|MSXom+Jn8c zgUEaFW1McGi^44xoG*b0JWE4T`vka7qTo#dcS4RauUpE{O!ZQ?r=-MlY#;VBzhHGU zS@kCaZ*H73XX6~HtHd*4qr2h}Pf0Re@!WOyvres_9l2!AhPiV$@O2sX>$21)-3i+_ z*sHO4Ika^!&2utZ@5%VbpH(m2wE3qOPn-I5Tbnt&yn9{k*eMr3^u6zG-~PSr(w$p> zw)x^a*8Ru$PE+{&)%VQUvAKKiWiwvc{`|GqK2K|ZMy^Tv3g|zENL86z7i<c zW`W>zV1u}X%P;Ajn+>A)2iXZbJ5YB_r>K-h5g^N=LkN^h0Y6dPFfSBh(L`G$D%7c` z&0RXDv$}c7#w*7!x^LUes_|V*=bd&aP+KFi((tG*gakSR+FA26%{QJdB5G1F=UuU&koU*^zQA=cEN9}Vd?OEh| zgzbFf1?@LlPkcXH$;YZe`WEJ3si6&R2MRb}LYK&zK9WRD=kY-JMPUurX-t4(Wy{%` zZ@0WM2+IqPa9D(^*+MXw2NWwSX-_WdF0nMWpEhAyotIgqu5Y$wA=zfuXJ0Y2lL3#ji26-P3Z?-&0^KBc*`T$+8+cqp`%g0WB zTH9L)FZ&t073H4?t=(U6{8B+uRW_J_n*vW|p`DugT^3xe8Tomh^d}0k^G7$3wLgP& zn)vTWiMA&=bR8lX9H=uh4G04R6>C&Zjnx_f@MMY!6HK5v$T%vaFm;E8q=`w2Y}ucJ zkz~dKGqv9$E80NTtnx|Rf_)|3wxpnY6nh3U9<)fv2-vhQ6v=WhKO@~@X57N-`7Ppc zF;I7)eL?RN23FmGh0s;Z#+p)}-TgTJE%&>{W+}C`^-sy{gTm<$>rR z-X7F%MB9Sf%6o7A%ZHReD4R;imU6<9h81{%avv}hqugeaf=~^3A=x(Om6Lku-Pn9i zC;LP%Q7Xw*0`Kg1)X~nAsUfdV%HWrpr8dZRpd-#%)c#Fu^mqo|^b{9Mam`^Zw_@j@ zR&ZdBr3?@<@%4Z-%LT&RLgDUFs4a(CTah_5x4X`xDRugi#vI-cw*^{ncwMtA4NKjByYBza)Y$hozZCpuxL{IP&=tw6ZO52WY3|iwGf&IJCn+u(>icK zZB1~bWXCmwAUz|^<&ysd#*!DSp8}DLNbl5lRFat4NkvItxy;9tpp9~|@ z;JctShv^Iq4(z+y7^j&I?GCdKMVg&jCwtCkc4*@O7HY*veGDBtAIn*JgD$QftP}8= zxFAdF=(S>Ra6(4slk#h%b?EOU-96TIX$Jbfl*_7IY-|R%H zF8u|~hYS-YwWt5+^!uGcnKL~jM;)ObZ#q68ZkA?}CzV-%6_vPIdzh_wHT_$mM%vws9lxUj;E@#1UX?WO2R^41(X!nk$+2oJGr!sgcbn1f^yl1 z#pbPB&Bf;1&2+?};Jg5qgD1{4_|%X#s48rOLE!vx3@ktstyBsDQWwDz4GYlcgu$UJ zp|z_32yN72T*oT$SF8<}>e;FN^X&vWNCz>b2W0rwK#<1#kbV)Cf`vN-F$&knLo5T& z8!sO-*^x4=kJ$L&*h%rQ@49l?7_9IG99~xJDDil00<${~D&;kiqRQqeW5*22A`8I2 z(^@`qZoF7_`CO_e;8#qF!&g>UY;wD5MxWU>azoo=E{kW(GU#pbOi%XAn%?W{b>-bTt&2?G=E&BnK9m0zs{qr$*&g8afR_x`B~o zd#dxPpaap;I=>1j8=9Oj)i}s@V}oXhP*{R|@DAQXzQJekJnmuQ;vL90_)H_nD1g6e zS1H#dzg)U&6$fz0g%|jxDdz|FQN{KJ&Yx0vfuzAFewJjv`pdMRpY-wU`-Y6WQnJ(@ zGVb!-8DRJZvHnRFiR3PG3Tu^nCn(CcZHh7hQvyd7i6Q3&ot86XI{jo%WZqCPcTR0< zMRg$ZE=PQx66ovJDvI_JChN~k@L^Pyxv#?X^<)-TS5gk`M~d<~j%!UOWG;ZMi1af< z+86U0=sm!qAVJAIqqU`Qs1uJhQJA&n@9F1PUrYuW!-~IT>l$I!#5dBaiAK}RUufjg{$#GdQBkxF1=KU2E@N=i^;xgG2Y4|{H>s` z$t`k8c-8`fS7Yfb1FM#)vPKVE4Uf(Pk&%HLe z%^4L>@Z^9Z{ZOX<^e)~adVRkKJDanJ6VBC_m@6qUq_WF@Epw>AYqf%r6qDzQ~AEJ!jtUvLp^CcqZ^G-;Kz3T;O4WG45Z zFhrluCxlY`M+OKr2SeI697btH7Kj`O>A!+2DTEQ=48cR>Gg2^5uqp(+y5Sl09MRl* zp|28!v*wvMd_~e2DdKDMMQ|({HMn3D%%ATEecGG8V9>`JeL)T0KG}=}6K8NiSN5W< z79-ZdYWRUb`T}(b{RjN8>?M~opnSRl$$^gT`B27kMym5LNHu-k;A;VF8R(HtDYJHS zU7;L{a@`>jd0svOYKbwzq+pWSC(C~SPgG~nWR3pBA8@OICK$Cy#U`kS$I;?|^-SBC zBFkoO8Z^%8Fc-@X!KebF2Ob3%`8zlVHj6H;^(m7J35(_bS;cZPd}TY~qixY{MhykQ zV&7u7s%E=?i`}Ax-7dB0ih47w*7!@GBt<*7ImM|_mYS|9_K7CH+i}?*#o~a&tF-?C zlynEu1DmiAbGurEX2Flfy$wEVk7AU;`k#=IQE*6DMWafTL|9-vT0qs{A3mmZGzOyN zcM9#Rgo7WgB_ujU+?Q@Ql?V-!E=jbypS+*chI&zA+C_3_@aJal}!Q54?qsL0In({Ly zjH;e+_SK8yi0NQB%TO+Dl77jp#2pMGtwsgaC>K!)NimXG3;m7y`W+&<(ZaV>N*K$j zLL~I+6ouPk6_(iO>61cIsinx`5}DcKSaHjYkkMuDoVl>mKO<4$F<>YJ5J9A2Vl}#BP7+u~L8C6~D zsk`pZ$9Bz3teQS1Wb|8&c2SZ;qo<#F&gS;j`!~!ADr(jJXMtcDJ9cVi>&p3~{bqaP zgo%s8i+8V{UrYTc9)HiUR_c?cfx{Yan2#%PqJ{%?Wux4J;T$#cumM0{Es3@$>}DJg zqe*c8##t;X(4$?A`ve)e@YU3d2Balcivot{1(ahlE5qg@S-h(mPNH&`pBX$_~HdG48~)$x5p z{>ghzqqn_t8~pY<5?-To>cy^6o~mifr;KWvx_oMtXOw$$d6jddXG)V@a#lL4o%N@A zNJlQAz6R8{7jax-kQsH6JU_u*En%k^NHlvBB!$JAK!cYmS)HkLAkm0*9G3!vwMIWv zo#)+EamIJHEUV|$d|<)2iJ`lqBQLx;HgD}c3mRu{iK23C>G{0Mp1K)bt6OU?xC4!_ zZLqpFzeu&+>O1F>%g-%U^~yRg(-wSp@vmD-PT#bCWy!%&H;qT7rfuRCEgw67V!Qob z&tvPU@*4*$YF#2_>M0(75QxqrJr3Tvh~iDeFhxl=MzV@(psx%G8|I{~9;tv#BBE`l z3)_98eZqFNwEF1h)uqhBmT~mSmT8k$7vSHdR97K~kM)P9PuZdS;|Op4A?O<*%!?h` zn`}r_j%xvffs46x2hCWuo0BfIQWCw9aKkH==#B(TJ%p}p-RuIVzsRlaPL_Co{&R0h zQrqn=g1PGjQg3&sc2IlKG0Io#v%@p>tFwF)RG0ahYs@Zng6}M*d}Xua)+h&?$`%rb z;>M=iMh5eIHuJ5c$aC`y@CYjbFsJnSPH&}LQz4}za9YjDuao>Z^EdL@%saRm&LGQWXs*;FzwN#pH&j~SLhDZ+QzhplV_ij(NyMl z;v|}amvxRddO81LJFa~2QFUs z+Lk zZck)}9uK^buJNMo4G(rSdX{57(7&n=Q6$QZ@lIO9#<3pA2ceDpO_340B*pHlh_y{>i&c1?vdpN1j>3UN-;;Yq?P+V5oY`4Z(|P8SwWq<)n`W@AwcQ?E9 zd5j8>FT^m=MHEWfN9jS}UHHsU`&SScib$qd0i=ky0>4dz5ADy70AeIuSzw#gHhQ_c zOp1!v6qU)@8MY+ zMNIID?(CysRc2uZQ$l*QZVY)$X?@4$VT^>djbugLQJdm^P>?51#lXBkdXglYm|4{L zL%Sr?2f`J+xrcN@=0tiJt(<-=+v>tHy{XaGj7^cA6felUn_KPa?V4ebfq7~4i~GKE zpm)e@1=E;PP%?`vK6KVPKXjUXyLS1^NbnQ&?z>epHCd+J$ktT1G&L~T)nQeExe;0Z zlei}<_ni ztFo}j7nBl$)s_3odmdafVieFxc)m!wM+U`2u%yhJ90giFcU1`dR6BBTKc2cQ*d zm-{?M&%(={xYHy?VCx!ogr|4g5;V{2q(L?QzJGsirn~kWHU`l`rHiIrc-Nan!hR7zaLsPr4uR zG{En&gaRK&B@lyWV@yfFpD_^&z>84~_0Rd!v(Nr%PJhFF_ci3D#ixf|(r@$igZiWw za*qbXIJ_Hm4)TaQ=zW^g)FC6uvyO~Hg-#Z5Vsrybz6uOTF>Rq1($JS`imyNB7myWWpxYL(t7`H8*voI3Qz6mvm z$JxtArLJ(1wlCO_te?L{>8YPzQ})xJlvc5wv8p7Z=HviPYB#^#_vGO#*`<0r%MR#u zN_mV4vaBb2RwtoOYCw)X^>r{2a0kK|WyEYoBjGxcObFl&P*??)WEWKU*V~zG5o=s@ z;rc~uuQQf9wf)MYWsWgPR!wKGt6q;^8!cD_vxrG8GMoFGOVV=(J3w6Xk;}i)9(7*U zwR4VkP_5Zx7wqn8%M8uDj4f1aP+vh1Wue&ry@h|wuN(D2W;v6b1^ z`)7XBZ385zg;}&Pt@?dunQ=RduGRJn^9HLU&HaeUE_cA1{+oSIjmj3z+1YiOGiu-H zf8u-oVnG%KfhB8H?cg%@#V5n+L$MO2F4>XoBjBeX>css^h}Omu#)ExTfUE^07KOQS znMfQY2wz?!7!{*C^)aZ^UhMZf=TJNDv8VrrW;JJ9`=|L0`w9DE8MS>+o{f#{7}B4P z{I34>342vLsP}o=ny1eZkEabr@niT5J2AhByUz&i3Ck0H*H`LRHz;>3C_ru!X+EhJ z6(+(lI#4c`2{`q0o9aZhI|jRjBZOV~IA_km7ItNtUa(Wsr*Hmb;b4=;R(gF@GmsRI`pF+0tmq0zy~wnoJD(LSEwHjTOt4xb0XB-+ z&4RO{Snw4G%gS9w#uSUK$Zbb#=jxEl;}6&!b-rSY$0M4pftat-$Q)*y!bpx)R%P>8 zrB&`YEX2%+s#lFCIV;cUFUTIR$Gn2%F(3yLeiG8eG8&)+cpBlzx4)sK?>uIlH+$?2 z9q9wk5zY-xr_fzFSGxYp^KSY0s%1BhsI>ai2VAc8&JiwQ>3RRk?ITx!t~r45qsMnj zkX4bl06ojFCMq<9l*4NHMAtIxDJOX)H=K*$NkkNG<^nl46 zHWH1GXb?Og1f0S+8-((5yaeegCT62&4N*pNQY;%asz9r9Lfr;@Bl${1@a4QAvMLbV6JDp>8SO^q1)#(o%k!QiRSd0eTmzC< zNIFWY5?)+JTl1Roi=nS4%@5iF+%XztpR^BSuM~DX9q`;Mv=+$M+GgE$_>o+~$#?*y zAcD4nd~L~EsAjXV-+li6Lua4;(EFdi|M2qV53`^4|7gR8AJI;0Xb6QGLaYl1zr&eu zH_vFUt+Ouf4SXA~ z&Hh8K@ms^`(hJfdicecj>J^Aqd00^ccqN!-f-!=N7C1?`4J+`_f^nV!B3Q^|fuU)7 z1NDNT04hd4QqE+qBP+>ZE7{v;n3OGN`->|lHjNL5w40pePJ?^Y6bFk@^k%^5CXZ<+4qbOplxpe)l7c6m%o-l1oWmCx%c6@rx85hi(F=v(2 zJ$jN>?yPgU#DnbDXPkHLeQwED5)W5sH#-eS z%#^4dxiVs{+q(Yd^ShMN3GH)!h!@W&N`$L!SbElXCuvnqh{U7lcCvHI#{ZjwnKvu~ zAeo7Pqot+Ohm{8|RJsTr3J4GjCy5UTo_u_~p)MS&Z5UrUc|+;Mc(YS+ju|m3Y_Dvt zonVtpBWlM718YwaN3a3wUNqX;7TqvAFnVUoD5v5WTh~}r)KoLUDw%8Rrqso~bJqd> z_T!&Rmr6ebpV^4|knJZ%qmzL;OvG3~A*loGY7?YS%hS{2R0%NQ@fRoEK52Aiu%gj( z_7~a}eQUh8PnyI^J!>pxB(x7FeINHHC4zLDT`&C*XUpp@s0_B^!k5Uu)^j_uuu^T> z8WW!QK0SgwFHTA%M!L`bl3hHjPp)|wL5Var_*A1-H8LV?uY5&ou{hRjj>#X@rxV>5%-9hbP+v?$4}3EfoRH;l_wSiz{&1<+`Y5%o%q~4rdpRF0jOsCoLnWY5x?V)0ga>CDo`NpqS) z@x`mh1QGkx;f)p-n^*g5M^zRTHz%b2IkLBY{F+HsjrFC9_H(=9Z5W&Eymh~A_FUJ} znhTc9KG((OnjFO=+q>JQZJbeOoUM77M{)$)qQMcxK9f;=L;IOv_J>*~w^YOW744QZ zoG;!b9VD3ww}OX<8sZ0F##8hvfDP{hpa3HjaLsKbLJ8 z0WpY2E!w?&cWi7&N%bOMZD~o7QT*$xCRJ@{t31~qx~+0yYrLXubXh2{_L699Nl_pn z6)9eu+uUTUdjHXYs#pX^L)AIb!FjjNsTp7C399w&B{Q4q%yKfmy}T2uQdU|1EpNcY zDk~(h#AdxybjfzB+mg6rdU9mDZ^V>|U13Dl$Gj+pAL}lR2a1u!SJXU_YqP9N{ose4 zk+$v}BIHX60WSGVWv;S%zvHOWdDP(-ceo(<8`y@Goy%4wDu>57QZNJc)f>Ls+}9h7 z^N=#3q3|l?aG8K#HwiW2^PJu{v|x5;awYfahC?>_af3$LmMc4%N~JwVlRZa4c+eW2 zE!zosAjOv&UeCeu;Bn5OQUC=jtZjF;NDk9$fGbxf3d29SUBekX1!a$Vmq_VK*MHQ4)eB!dQrHH)LVYNF%-t8!d`@!cb z2CsKs3|!}T^7fSZm?0dJ^JE`ZGxA&a!jC<>6_y67On0M)hd$m*RAzo_qM?aeqkm`* zXpDYcc_>TFZYaC3JV>{>mp(5H^efu!Waa7hGTAts29jjuVd1vI*fEeB?A&uG<8dLZ z(j6;-%vJ7R0U9}XkH)1g>&uptXPHBEA*7PSO2TZ+dbhVxspNW~ZQT3fApz}2 z_@0-lZODcd>dLrYp!mHn4k>>7kibI!Em+Vh*;z}l?0qro=aJt68joCr5Jo(Vk<@i) z5BCKb4p6Gdr9=JSf(2Mgr=_6}%4?SwhV+JZj3Ox^_^OrQk$B^v?eNz}d^xRaz&~ zKVnlLnK#8^y=If2f1zmb~^5lPLe?%l}>?~wN4IN((2~U{e9fKhLMtYFj)I$(y zgnKv?R+ZpxA$f)Q2l=aqE6EPTK=i0sY&MDFJp!vQayyvzh4wee<}kybNthRlX>SHh z7S}9he^EBOqzBCww^duHu!u+dnf9veG{HjW!}aT7aJqzze9K6-Z~8pZAgdm1n~aDs z8_s7?WXMPJ3EPJHi}NL&d;lZP8hDhAXf5Hd!x|^kEHu`6QukXrVdLnq5zbI~oPo?7 z2Cbu8U?$K!Z4_yNM1a(bL!GRe!@{Qom+DxjrJ!B99qu5b*Ma%^&-=6UEbC+S2zX&= zQ!%bgJTvmv^2}hhvNQg!l=kbapAgM^hruE3k@jTxsG(B6d=4thBC*4tzVpCYXFc$a zeqgVB^zua)y-YjpiibCCdU%txXYeNFnXcbNj*D?~)5AGjL+!!ij_4{5EWKGav0^={~M^q}baAFOPzxfUM>`KPf|G z&hsaR*7(M6KzTj8Z?;45zX@L#xU{4n$9Q_<-ac(y4g~S|Hyp^-<*d8+P4NHe?~vfm z@y309=`lGdvN8*jw-CL<;o#DKc-%lb0i9a3%{v&2X($|Qxv(_*()&=xD=5oBg=$B0 zU?41h9)JKvP0yR{KsHoC>&`(Uz>?_`tlLjw1&5tPH3FoB%}j;yffm$$s$C=RHi`I3*m@%CPqWnP@B~%DEe;7ZT{9!IMTo1hT3Q347HJ&!)BM2 z3~aClf>aFh0_9||4G}(Npu`9xYY1*SD|M~9!CCFn{-J$u2&Dg*=5$_nozpoD2nxqq zB!--eA8UWZlcEDp4r#vhZ6|vq^9sFvRnA9HpHch5Mq4*T)oGbruj!U8Lx_G%Lby}o zTQ-_4A7b)5A42vA0U}hUJq6&wQ0J%$`w#ph!EGmW96)@{AUx>q6E>-r^Emk!iCR+X zdIaNH`$}7%57D1FyTccs3}Aq0<0Ei{`=S7*>pyg=Kv3nrqblqZcpsCWSQl^uMSsdj zYzh73?6th$c~CI0>%5@!Ej`o)Xm38u0fp9=HE@Sa6l2oX9^^4|Aq%GA z3(AbFR9gA_2T2i%Ck5V2Q2WW-(a&(j#@l6wE4Z`xg#S za#-UWUpU2U!TmIo`CN0JwG^>{+V#9;zvx;ztc$}@NlcyJr?q(Y`UdW6qhq!aWyB5xV1#Jb{I-ghFNO0 zFU~+QgPs{FY1AbiU&S$QSix>*rqYVma<-~s%ALhFyVhAYepId1 zs!gOB&weC18yhE-v6ltKZMV|>JwTX+X)Y_EI(Ff^3$WTD|Ea-1HlP;6L~&40Q&5{0 z$e$2KhUgH8ucMJxJV#M%cs!d~#hR^nRwk|uuCSf6irJCkSyI<%CR==tftx6d%;?ef zYIcjZrP@APzbtOeUe>m-TW}c-ugh+U*RbL1eIY{?>@8aW9bb1NGRy@MTse@>= za%;5=U}X%K2tKTYe9gjMcBvX%qrC&uZ`d(t)g)X8snf?vBe3H%dG=bl^rv8Z@YN$gd9yveHY0@Wt0$s zh^7jCp(q+6XDoekb;=%y=Wr8%6;z0ANH5dDR_VudDG|&_lYykJaiR+(y{zpR=qL3|2e${8 z2V;?jgHj7}Kl(d8C9xWRjhpf_)KOXl+@c4wrHy zL3#9U(`=N59og2KqVh>nK~g9>fX*PI0`>i;;b6KF|8zg+k2hViCt}4dfMdvb1NJ-Rfa7vL2;lPK{Lq*u`JT>S zoM_bZ_?UY6oV6Ja14X^;LqJPl+w?vf*C!nGK;uU^0GRN|UeFF@;H(Hgp8x^|;ygh? zIZx3DuO(lD01ksanR@Mn#lti=p28RTNYY6yK={RMFiVd~k8!@a&^jicZ&rxD3CCI! zVb=fI?;c#f{K4Pp2lnb8iF2mig)|6JEmU86Y%l}m>(VnI*Bj`a6qk8QL&~PFDxI8b z2mcsQBe9$q`Q$LfG2wdvK`M1}7?SwLAV&)nO;kAk`SAz%x9CDVHVbUd$O(*aI@D|s zLxJW7W(QeGpQY<$dSD6U$ja(;Hb3{Zx@)*fIQaW{8<$KJ&fS0caI2Py^clOq9@Irt z7th7F?7W`j{&UmM==Lo~T&^R7A?G=K_e-zfTX|)i`pLitlNE(~tq*}sS1x2}Jlul6 z5+r#4SpQu8h{ntIv#qCVH`uG~+I8l+7ZG&d`Dm!+(rZQDV*1LS^WfH%-!5aTAxry~ z4xl&rot5ct{xQ$w$MtVTUi6tBFSJWq2Rj@?HAX1H$eL*fk{Hq;E`x|hghRkipYNyt zKCO=*KSziiVk|+)qQCGrTYH9X!Z0$k{Nde~0Wl`P{}ca%nv<6fnYw^~9dYxTnTZB&&962jX0DM&wy&8fdxX8xeHSe=UU&Mq zRTaUKnQO|A>E#|PUo+F=Q@dMdt`P*6e92za(TH{5C*2I2S~p?~O@hYiT>1(n^Lqqn zqewq3ctAA%0E)r53*P-a8Ak32mGtUG`L^WVcm`QovX`ecB4E9X60wrA(6NZ7z~*_DV_e z8$I*eZ8m=WtChE{#QzeyHpZ%7GwFHlwo2*tAuloI-j2exx3#x7EL^&D;Re|Kj-XT- zt908^soV2`7s+Hha!d^#J+B)0-`{qIF_x=B811SZlbUe%kvPce^xu7?LY|C z@f1gRPha1jq|=f}Se)}v-7MWH9)YAs*FJ&v3ZT9TSi?e#jarin0tjPNmxZNU_JFJG z+tZi!q)JP|4pQ)?l8$hRaPeoKf!3>MM-bp06RodLa*wD=g3)@pYJ^*YrwSIO!SaZo zDTb!G9d!hb%Y0QdYxqNSCT5o0I!GDD$Z@N!8J3eI@@0AiJmD7brkvF!pJGg_AiJ1I zO^^cKe`w$DsO|1#^_|`6XTfw6E3SJ(agG*G9qj?JiqFSL|6tSD6vUwK?Cwr~gg)Do zp@$D~7~66-=p4`!!UzJDKAymb!!R(}%O?Uel|rMH>OpRGINALtg%gpg`=}M^Q#V5( zMgJY&gF)+;`e38QHI*c%B}m94o&tOfae;og&!J2;6ENW}QeL73jatbI1*9X~y=$Dm%6FwDcnCyMRL}zo`0=y7=}*Uw zo3!qZncAL{HCgY!+}eKr{P8o27ye+;qJP;kOB%RpSesGoHLT6tcYp*6v~Z9NCyb6m zP#qds0jyqXX46qMNhXDn3pyIxw2f_z;L_X9EIB}AhyC`FYI}G3$WnW>#NMy{0aw}nB%1=Z4&*(FaCn5QG(zvdG^pQRU25;{wwG4h z@kuLO0F->{@g2!;NNd!PfqM-;@F0;&wK}0fT9UrH}(8A5I zt33(+&U;CLN|8+71@g z(s!f-kZZZILUG$QXm9iYiE*>2w;gpM>lgM{R9vT3q>qI{ELO2hJHVi`)*jzOk$r)9 zq}$VrE0$GUCm6A3H5J-=Z9i*biw8ng zi<1nM0lo^KqRY@Asucc#DMmWsnCS;5uPR)GL3pL=-IqSd>4&D&NKSGHH?pG;=Xo`w zw~VV9ddkwbp~m>9G0*b?j7-0fOwR?*U#BE#n7A=_fDS>`fwatxQ+`FzhBGQUAyIRZ??eJt46vHBlR>9m!vfb6I)8!v6TmtZ%G6&E|1e zOtx5xy%yOSu+<9Ul5w5N=&~4Oph?I=ZKLX5DXO(*&Po>5KjbY7s@tp$8(fO|`Xy}Y z;NmMypLoG7r#Xz4aHz7n)MYZ7Z1v;DFHLNV{)to;(;TJ=bbMgud96xRMME#0d$z-S z-r1ROBbW^&YdQWA>U|Y>{whex#~K!ZgEEk=LYG8Wqo28NFv)!t!~}quaAt}I^y-m| z8~E{9H2VnyVxb_wCZ7v%y(B@VrM6lzk~|ywCi3HeiSV`TF>j+Ijd|p*kyn;=mqtf8&DK^|*f+y$38+9!sis9N=S)nINm9=CJ<;Y z!t&C>MIeyou4XLM*ywT_JuOXR>VkpFwuT9j5>667A=CU*{TBrMTgb4HuW&!%Yt`;#md7-`R`ouOi$rEd!ErI zo#>qggAcx?C7`rQ2;)~PYCw%CkS(@EJHZ|!!lhi@Dp$*n^mgrrImsS~(ioGak>3)w zvop0lq@IISuA0Ou*#1JkG{U>xSQV1e}c)!d$L1plFX5XDXX5N7Ns{kT{y5|6MfhBD+esT)e7&CgSW8FxsXTAY=}?0A!j_V9 zJ;IJ~d%av<@=fNPJ9)T3qE78kaz64E>dJaYab5uaU`n~Zdp2h{8DV%SKE5G^$LfuOTRRjB;TnT(Jk$r{Pfe4CO!SM_7d)I zquW~FVCpSycJ~c*B*V8?Qqo=GwU8CkmmLFugfHQ7;A{yCy1OL-+X=twLYg9|H=~8H znnN@|tCs^ZLlCBl5wHvYF}2vo>a6%mUWpTds_mt*@wMN4-r`%NTA%+$(`m6{MNpi@ zMx)8f>U4hd!row@gM&PVo&Hx+lV@$j9yWTjTue zG9n0DP<*HUmJ7ZZWwI2x+{t3QEfr6?T}2iXl=6e0b~)J>X3`!fXd9+2wc1%cj&F@Z zgYR|r5Xd5jy9;YW&=4{-0rJ*L5CgDPj9^3%bp-`HkyBs`j1iTUGD4?WilZ6RO8mIE z+~Joc?GID6K96dyuv(dWREK9Os~%?$$FxswxQsoOi8M?RnL%B~Lyk&(-09D0M?^Jy zWjP)n(b)TF<-|CG%!Vz?8Fu&6iU<>oG#kGcrcrrBlfZMVl0wOJvsq%RL9To%iCW@)#& zZAJWhgzYAq)#NTNb~3GBcD%ZZOc43!YWSyA7TD6xkk)n^FaRAz73b}%9d&YisBic(?mv=Iq^r%Ug zzHq-rRrhfOOF+yR=AN!a9*Rd#sM9ONt5h~w)yMP7Dl9lfpi$H0%GPW^lS4~~?vI8Z z%^ToK#NOe0ExmUsb`lLO$W*}yXNOxPe@zD*90uTDULnH6C?InP3J=jYEO2d)&e|mP z1DSd0QOZeuLWo*NqZzopA+LXy9)fJC00NSX=_4Mi1Z)YyZVC>C!g}cY(Amaj%QN+bev|Xxd2OPD zk!dfkY6k!(sDBvsFC2r^?}hb81(WG5Lt9|riT`2?P;B%jaf5UX<~OJ;uAL$=Ien+V zC!V8u0v?CUa)4*Q+Q_u zkx{q;NjLcvyMuU*{+uDsCQ4U{JLowYby-tn@hatL zy}X>9y08#}oytdn^qfFesF)Tt(2!XGw#r%?7&zzFFh2U;#U9XBO8W--#gOpfbJ`Ey z|M8FCKlWQrOJwE;@Sm02l9OBr7N}go4V8ur)}M@m2uWjggb)DC4s`I4d7_8O&E(j; z?3$9~R$QDxNM^rNh9Y;6P7w+bo2q}NEd6f&_raor-v`UCaTM3TT8HK2-$|n{N@U>_ zL-`P7EXoEU5JRMa)?tNUEe8XFis+w8g9k(QQ)%?&Oac}S`2V$b?%`DwXBgja&&fR@ zH_XidF$p1wA)J|Wk1;?lCl?fgc)=TB3>Y8;BoMqHwJqhL)Tgydv9(?(TBX)fq%=~C zmLj!iX-kn7QA(9snzk0LRf<%SzO&~IhLor6A3f*U^UcoAygRe!H#@UCv$JUP&vPxs zeDj$1%#<2T1!e|!7xI+~_VXLl5|jHqvOhU7ZDUGee;HnkcPP=_k_FFxPjXg*9KyI+ zIh0@+s)1JDSuKMeaDZ3|<_*J8{TUFDLl|mXmY8B>Wj_?4mC#=XjsCKPEO=p0c&t&Z zd1%kHxR#o9S*C?du*}tEHfAC7WetnvS}`<%j=o7YVna)6pw(xzkUi7f#$|^y4WQ{7 zu@@lu=j6xr*11VEIY+`B{tgd(c3zO8%nGk0U^%ec6h)G_`ki|XQXr!?NsQkxzV6Bn1ea9L+@ z(Zr7CU_oXaW>VOdfzENm+FlFQ7Se0ROrNdw(QLvb6{f}HRQ{$Je>(c&rws#{dFI^r zZ4^(`J*G0~Pu_+p5AAh>RRpkcbaS2a?Fe&JqxDTp`dIW9;DL%0wxX5;`KxyA4F{(~_`93>NF@bj4LF!NC&D6Zm+Di$Q-tb2*Q z&csGmXyqA%Z9s(AxNO3@Ij=WGt=UG6J7F;r*uqdQa z?7j!nV{8eQE-cwY7L(3AEXF3&V*9{DpSYdyCjRhv#&2johwf{r+k`QB81%!aRVN<& z@b*N^xiw_lU>H~@4MWzgHxSOGVfnD|iC7=hf0%CPm_@@4^t-nj#GHMug&S|FJtr?i z^JVrobltd(-?Ll>)6>jwgX=dUy+^n_ifzM>3)an3iOzpG9Tu;+96TP<0Jm_PIqof3 zMn=~M!#Ky{CTN_2f7Y-i#|gW~32RCWKA4-J9sS&>kYpTOx#xVNLCo)A$LUme^fVNH z@^S7VU^UJ0YR8?Oy$^IYuG*bm|g;@aX~i60%`7XLy*AYpYvZ^F^U(!|RW z*C!rJ@+7TGdL=nNd1gv^%B+;Fcr$y)i0!GRsZXRHPs>QVGVR{9r_#&Qd(wL|5;H;> zD>HUw=4CF++&{7$<8G@j*nGjhEO%BQYfjeItp4mPvY*JYb1HKd!{HJ9*)(3%BR%{Pp?AM&*yHAJsW({ivOzj*qS!-7|XEn6@zo z3L*tBT%<4RxoAh>q{0n_JBmgW6&8hx?kL(_^k%VL>?xjAyrKBmSl`$=V|SK}ELl}@ zd|d0eo#RfG`bw9SK3%r4Y+rdvc}w}~ixV%tqawbdqvE-WcgE+BUpxMT%F@btm76MG zn=oQRWWuTm+a{dy)Oc2V4yX(@M{QAkx>(QB59*`dLT`Pz3Lsj9iB=HSHAiCq()ns|Cr)1*c605Cx}3V&x}Lg?b+6Q?)z7Kl zQh&1Hx`y6JY-Cwvd*ozeps}a1xAA0CR+Da;+O(i)P1C;SjOI}Dtmf6tPqo-Bl`U78 zv$kYgPntPp@G)n1an9tEoL*Vumu9`>_@I(;+5+fBa-*?fEx=mTEjZ7wq}#@Gd5_cW z!mP{N=yqEntDo)|>oy6{9cu+-3*GTnmb^`O0^FzRPO^&aG`f@F_R*aQ_e{F+_9%NW z4KG_B`@X3EVV9L>?_RNDMddA>w=e0KfAiw5?#i1NFT%Zz#nuv(&!yIU>lVxmzYKQ` zzJ*0w9<&L4aJ6A;0j|_~i>+y(q-=;2Xxhx2v%CYY^{} z^J@LO()eLo|7!{ghQ+(u$wxO*xY#)cL(|miH2_ck2yN{mu4O9=hBW*pM_()-_YdH#Ru{JtwJ^R2}3?!>>m1pohh zrn(!xCjE0Q&EH1QK?zA%sxVh&H99cObJUY$veZhQ)MLu-h%`!*G)s$2k;~+A z)Kk->Ri?`oGDEJEtI*wijm(s5f$W78FH{+qBxiU{~kq((J3uK{m z$|C8K#j-?hm8H@x%VfFqpnvu@xn1s%J7uNZC9C99a<_b1J|mx%)$%!6gPU|~<@2&m zz99GDp`|a%m*iggvfL;4%X;~WY>)@!tMWB@P`)k?$;0x9JSrRI8?s3rlgH(o@`OAo zn{f*gZ#t2u6K??hx|aElOM`Xd0t+SAIUEHvFw%?Wsm$s zUXq{6UU?a>Nc@@Xlb_2k9M1Ctr<#+O?yd}rv z_wu&=_t$!Yngd@N_AUj}T; z#*Ce|%XZr_sQcsWcsl{pCnnj+c8ZNIMmx<;w=-g$Q>BU;9k;w|zQ;4!W32Xg2Cd?{ zvmO3kuKQ^Hv;o>6ZHP8ZJ2`4~Bx?N;cf<0fi=!*G^^WzbTF3e$b&d^qqB{>nqLG81 zs94bBh%|Vj+hLu=!8(b9brJ>ZBns9^6s(gdSVyP9qnu2_I{Sg8j-rloG6{d`De5We zDe5WeY3ga}Y3ga}Y3ga}Y3ga}Y3ga}d8y~6o|k%F>UpW>rJk31Ug~+N=cS&HdOqs; zsOO`ek9t1p`Kafko{xGy>iMbXr=FjBxZMYc8a#gL`Kjlpo}YSt>iMY`pk9DF0qO*( z6QE9jIsxhgs1u-0kUBx8D@eT{^@7w3QZGooAoYUO3sNscy%6<6)C*BBM7L`dk$Xk%6}eZQXgo#!75P`>Uy*-B{uTLGUy*-B{uTLGUy*-B{uTLGqo1h^Sl?5fQHy z3@Rvsm7*022$ABYeX&1l3tg19UZPd{Y7=d(ZPnK*Z!eHN`F)=`XUP&m>-+!xexJ{O zH?uQy&YWkSnR(`!XP)Po6M+eWU=cP6lF%}8|&%ddqyBm-N z{Tbxb7T>Ub5&Qa-3;A|IxTbl@!uc_wt`W~KsKouq5?nAIk=G#~L%w9miksK%HQQQ{ zzfTavPj6Ut{ruBkb_@}Og}BCEUNL`N3kwKu2*ToWl=rNhzhYtg&RxKL@zsJLZD?6_ z)6MT)KY6VnEc-dCU%z(Yf<p=6vpVK=EbUm|aev2Sol<97XHI8v zXGLdiXI~kpyFL~$jshU}17x8WWT8XXk=5bpsP3rg7y`(n zIwk?~f{vDsO&zVBtW(#S)#>Rh>8$RIb`I$r)_Ha3q|SMrEuEV>TRR^k$lafGpY2}M zVffuAzdQcBB_By=ogbJ#NcZG;vOPAB$)oq^in@!GqD0Z(i~d^lRneb|eqZ!a(Je(c z7p*8-T(qcYUeVm5=AxNJ(~Bk+jV>Bi)L0ZPiWI)7_7<@IzyG1}62u2Jz_o}yTA=aj zhtMB^C}pn}Kx-Z(Js2;+fVfHxf(`LpH3)XZht(iB1fdxBC(c1#}I^JNDoFl zLJb1)9itFNdk&aVx@ONUs!x zPPD6&a9)ELICrKYjb}Qu5OR>d9kB-ixC{3pEezwwFAxLw z&Rt0VQV>2yL_q+xojbvUAiRb6BoBh{HsUip2*Nvvf5n3!v?KmI4}$Qn!2a9DgCM+z z*ujG!{06a$2SIoraVZai@Bv~!4+1!nz(8B*M*d+UA_}P=+@vm6KQemx|IZ&{%9ngF z6Ta1luR8(*pAzxKdcc-Q9yHt_1fFL?)u3YrS@cW)NIdu6+TkMQK-BSSzbUXicV+ z7LJQfeo#IlfbN;MP!5Nh#M-dlp!XH~1I+J>hHIkui9{peklW?<)dWOeu~{^D4PL#| zD|wXm^y>OyVQ0aZap5CH^Ox`c<=T>=rVnB_>dwaQEggHy@vmD3>0bzs8&jBFKYXyA z-4;{Y^=v0QH|FM{{VloGGiwhoyXCuqL+fHywXyxPx4yD?S+u!2$5A=EDHezTzc_1^ z$B8G1@Tg7lxULP-7V(4vy6^s)Rm!i)R}n9>dqa`hnlfLpA;5gadZ)u}W=@CenE2(o zg9q0IDl1=D`S|^^4>Hy=gPFMtS+t4OT5HM-I`k92rd^Ug8!~3%Oq=!oi6f_)jfpIynerv~O}wgE zdN%R*EO+keNVFoyJvl1fXv~m)D%p*RiPr3#)hjD9neu_m!lbUMtEAt2Y*Aj8D_t8ZI( zOLJt{`Yi{Vn)Yv5Kdf%{+O_MY7e-ty516`UNd5XvcO08O{n#Cw*4GbNGj)JG8eJ@Q zzbuTBcc6cbBu_DWIP5GH!@THQWpxD<2Gj#x+Ol-P&stk*TFHxBwc zkvJeWBhj@X7L&I0#BsWw7=GzRdEABL@;Hz!%_2nV2boGO$>*rR`I`keR*_V}tZ1jV zxD1pW3422>U9bGVy??I2skAr?3Y@IfSs*s2<`M@|bC=$eb9TLQ$KZ#x_MPtP==*wV`EOH3 z&P~?T11}||T=Rc&Tiu<}Jh`;r`|NR|C7MA*OAN~iMnsRfH?*pM8{gs&flJGQr>@Q4eq1ZnwMC4)3ed| zy64ZIe|{ar5b(>Gz(DuUU*zvXsm~f_TF@bu+v0Jhy(ggfg-Il*vU9i&7^09XY-!SfL3is01oMw=+<0u`OONSvkBOPN(&Wm24|CRYu-M^_clmsRI@E6Vi2O5HsTfyq*CrnqKf^Q?^^DGDyGgj_z>R@RGLqE=-UPD8ENsq-cmp9W_2*&+8QgS3U&jTUppg-(K4_w-?!PX4|`0`BFKde7Se8I9ECN%{OeuH_8Iw7?TfQyu)l%()Epc{}6<1$YOh- z|8f9Vl1~KYle{b};mf=k$cS%!U7q*@JNlM$pW{t-H1TOD?_eIam4tLw3GwF~1Y!^} z-^pU_O~Rp$VzfUCGm>aX_+WolK8mx-xbhLZ_2^Lo!uLz(6ceySkD<-zYsi{Mfr(ov z#FbE?s7~UVCf3vF3;+(ZkIsFxckbN1S|p0f;jh1D)4o>XJI|lr8JCY^h ztaba7r!;0sJXLH4rvy)(Om}Y87%d{sy9Lg>vji`oM*&dp^kGAR3ZmE#f(J%w!x(w& zkquVy#3L>DK7W2E@!(TWZciMzBrACynRNbns`l3H*oC+BGYd$1gSCkjicJg;Nn6Tq+tPaP&9fbY?p?QG^)g^U)lME^EH5{Xn5>uv zRcCthbQ3u};0JAd480i?u0oGmp+&$LC09d8?@i28h<&IgX@UAk7AC2l%fh|#a@+M! zfArZ$PhSrfnPJ}gd#3;WR-WwYFs1EHGw~m>xhIYNTjk9tkH>CS+BsXRyyLCatKYhV z=iXOp=plB7epAvwo90GbZk9fS%miMU!@N3cCWFcb`Wh%}qHdb5;Ezvj9kn(22c<|0 z=1V-Dyns6Zqr#F}I4tlo4og=W#e!(?V?L;mSnG&Y%ZANJ!lZJ0`6o$%5A z6$~H5XaXsLdWjWxZQz|tiVbWb#S^g@zi}?kx0O^PaR5sksL{h8B#Osc6^pS-6y!1t z-KG_c0I5_?WXjWVB77`C0E0X9N$$~z7hXOe1-sAMkd&T~4x>?4OukyeKg!$Ss|6H5 zgB~bOk%}NSOT8$!b!AJRrG^W~W3lvW_(!D??CLo`Fkp;@bdj&gQl!RTR&3Ba+^!HQ zcM>BYMw~rfP*6Cvkbcl06VyMyHCmL{3Z@kl7Saz|0P59!h_)Coo>-$bXk4NXvs9SR z6HF}jXQj^+Q;59=KB5$x&J7=^@jchhecIDX(a}&ek zaq&bvo@jmCXf_+^N9}Lu{ej0(tmnmo;H@o#*0YK+AJaokW}(q74zR({(gF=9v%Bqb zTXDIqP_I|+xK6n-JKxmLVqq&Pno8`~vU{gw^{-X79}C<(l=ZU*%$d@sUAF2xQ?9`< zbf_y*`R9)Y%p5AFv(pbMKjVFXev^KNx?$@i#U6B+n8{|*!U|=?=#N^iqzg!Xot4&{ znled^`m-4O&AK1Ey~P=(w7d~D{ntD@Q886Ci0Q79B3AjGaW@>;{k>V6ZlCj%e6;Ps z=ylQZG=pRcU$tiBwC&?(8N%gKL%zEp(_#oIci%RC%KWbF^QX0NGgLlcYIBh)+oT4{yo9ax;B(`_Zh3EE_-KeH0}s1>WWM1zi|8vM8yb;}!f zhO(RiZ!uU31~)ERJQg?5Gr9D$Xe*Xm5Hp*qC}v^p;w z*N{S;G6K<5kG?@5T>?=z=@LN2k=}Xf-`uBNVd4PSA2h4_n67NfNuN0j;swsG4xaJg z7L*Pbj#Ew^=PZz3RJW3j!b0VUbGT$csKSDU|GP+LcF9pJrBsJ=9lH5vrwS)Ti|K!5=NyGy*{4rGE8dDr?fg=uqmT+G`HiEHcE>4gPhlm$92*;Zd%Ul{ zpmt$35ulqOKA6%j;t{EBA`5A6KB6PRvexkL+I708Ne}>H@zhp9`it*R{N>86N@>x- z3&+I=F1F%dHA>wNv_XcqkjF)D`$D=XZK*6u*orDEi^MOB_}+k3N>3)%@GB4CHv#nt z?eKeKAnG4CEE<Mp%Hx^%i-A(-muYYU(^2Z)~Z|7t3D;wYa+m6+L8#*+-c=@Wm zW509ThTq(o7(us|Eq@Gk^yo;icf3SH!mP#63-wZru;#W47kX(!x~`LE(6$}Vi^47N zi~60;0vj61428fB)@M?iHc3)I^p`;w$?chLv7dAF#F^sX6=eK$oe@it)27o_nti2wO;QUQ$BiYO?c(b z$y08CxwPs&TMntO#Z)Evb|%dVLKxVcG&vO(48(u&^5bWy0(G0UOiUy_ndu-2YWw~_EjnngQRBr9$MJm7l7k%1~8!AYCYpA$= zT8QnrQCZI0jvv?|#|imD02riJ?se-8q?N#qnQE_vj^0^p))|_lA|{W!SiMfXd;0cd z^)uNLWtSoQ>R~g6)n^ngUOcz3fSs&O;xNh6oW$WSsNtI47tQYQuoc6~YGD7wM5eJI zeD(vM0&uBb_>k(Q2OsnXw=bliQaNbYG3DtbF3J~TOsU_U;tY z<)?53WlkyY6HG4WZb4hH%kt7RPE|NKt$?YRQdX67>@#HyaYvH4pnf0A{>X7t(qyZ__dbhJ@DNS8g3wYhwr*rrmI;~1cYLv&N zili4|Knm6RtQ`GL?L(L0OWR9m5@8WgvY|ynH;~r?jS)Uvj;65>V{deEnD}#ewk9Iy zCf9fBXLQlI0$x2AkJ*d7qcy02{DKo|6UG&+pQ&SiIoz6vG^GdTW$-wL91iKx7v;xf`du&bMkZ0 zDWdmMHLyAu+rpSOw8C-)tR1@fFQA+MV((ry8G4I&Tz;T0q~q_+N!MMs!}?LK-r=mm?8D1TwQF%q;k^xz(Wtad5na1(q_0unK2 zkStczCfz_zWDaN)WH<4v-qlWy>udvx^L@eL!MvsSw8|EPUet-{vRSrEc2}BPXYm(g zv&%;%@khy65o!*F$CYR6Tka6`CZj9kVuwa~skwI_5y2mv$! z-JPnCPwkP(WTGLx++|&IKk2l%j*I$4T^mSmmP?up==#je0EHj9kky8pq-br}Stz=7 z&PWt_T*W<`T`RY}k@M25_=EQqzV@1>--zX-JXZOU(U)SQmzEE*jjyE6N& zx3gD`g#u^M0q@C^d5_&5A2e%fG&3G|OuB1C{8!cAjgMLGKJ!NQ@~h*cS7iSRZSJu_ z*h#iZZFAC8V@Xlu@NclqH;?>(4VU1(nZoUN}no& zm0_%$RVIri4)D5v!PgFGvP-RS2?GsUQT^PuXEyuvBk%v?9m|r}*nI83TRc0zJo0Si?GC#&vwQ=pj z{(yY4dP&pJ#?dy)Z7*cxo|-))T{LB}?+ui*oxgTu%L8SfBjWJcz}k0RyiJ}3 zi9fP{qoBZ{yp7*GW3&qKHMb2i?*RCJMWOK*m~Rk+iJu%R;mBt|lIY3;x!b|l66o`x z`45*y3ngC#D~3c4n^lEKl(9+_i!&Pio`U~!+3e0Qy#@Y8qfZo9k%k;xMd|;#&g`*? ziGM18l!|S({bY9KbkrhkVMa&VVSlx?HPe-CYPAK*o=JZH`+*V;C0TDDYsM1yCu58e|qLKI0(-%dwMusZ?{BW7uS~!p1WyU$dRrq$O+%%@ti!fDs$>k;3swe zOt@YCLJng`F_`?_nZc|t4(Q-K(WDO*>fA!8NseMOmUNMb>J5dmojfPNFy$|D_4y+w z-n8bC)<@RdG;w6UKDYOU#E4C6r_8FnI)g#>?)Vygkk?ECJTFS%MHY_o-(WN5>=8Ty|-h$Id&pc$D*Epw+{chQY zVN0{;l?XE0BA_j8*p~%_Iwt+j4c|pi=htTtn&Xg^!Fba}B5}uC`aP`ThOF?hIrm0;S6zLX+Np z0?ny%7Y?+LA@d>U!o}(U7{rfO#X6ylmv_je&z+2lizmuw_4`LL_<14{$byGpU)@TQACXCAB4nM?DW ziH(jrM`EKhPs)lb``Ih(6=gq`!ciXC3xQYiu;mt4wpG~`%eBw>XpTKMrtGq2yDV&Z z^M+>e7s`K_gN_PErsFZ;;`~2 zxwpvUkUoIjF*>TDLTs)8#{sSoT)4jm+2IDD18GGdc8~qP4wI&ldEw*jB7dYNy}zcB zsYX6>3}==4Z2$O$Prmx(!twrWJ+jv6{@T)piXv+Uq$4mEGyt`DGy|H?+ zGWgPESV)nOk97V1H|+LPtUv4j&!6MB@(p(9Z{Us93WF!S2mZkFuxREfe*o?xJe82Hr(qPEN8kx^iW9sEp$L7-p|E;n{Bi2 zvy#pyDGQF%e0CsNhBZGa_()+(I@b@B`Xs+6I7`zaOxE6$NHT* zrMyS70w-*kkEuph1({|uFApmalndC(z?%Yh)sn30QSn=)9wlT9|C z7p2S$i#{I84rOMZ7Y$Aq8qVMy;FR~sdx&Q;gCBc0e918)>Lw2fe-y3~?3Do>6aMtW zAO2}V$AI0tk^b}X{UV7&Bo#vg zBX?XFBhgMM!+9hbyiUpI_gM!s_^O2AlM~9THqYDch&A4pbv{t~WkI7~c{#t)599Uu z_wI}BjD=tjmfOnnPyIZ%RB0I-t7pwc{bQAr*BEwIPFB9?yj{6J#@4pK3+4xbmE)uG zG_n(ezP#vpcsoK9*ucoN;kIkT&Ld86et47m;G~ zADaJ({++k8wK3)X_IEjdOamWr%G1$5johcE6eLl^xF-lmP-O#TQRiMXI9BBL+MBqb z$ZZAvL{;fK7~&{RjvLrAbB5Kl!kjUk1*R`wF>U!~L!L!BWOz2;JTS&e@6zX4-pI1q zvXm&xkkciDEQ>nhBQvN0($Y`$rWUiqW?nz8b%OGo%fByE%(RvouU67$v8m4TLZ_pE zF;UVF-)LZRHKriVX9L%&d%Swi|U!2ZYn*45pNP zL?u}1GUcH7DWu^^pURnjYvSw7@0B~*)CsNQ*!rw2XXcHjXI{>*WTXRS5vL|99LjUE z*x$ZT5toGdv^MF?kTd!IpS*khFnN*g-0ClbWK2@INQzm5SAyFsgwR2B+9pE8;d1M8 zh{4F?%ALw{sB*of)ZF6A;+Tk;nfqQ*(m$X2k}F58JQO0#uwVLs&Cpu6e7f@XG!x5Q z=_*oo==9IZXyW$4b>R zK%~1PJAV=663FfjXf0})6$gWek%4{&k+fC@pI)4R36hHqo9d|8mznqmV{H7?;%dn( zv#e+1TPJ{}9(I(6LXttB?Rt6Y7wqryq@0Gv%w!qVgd0{)1GKZ7 z_4$_9T{fGG#WM_9X;P-`;Tdcyts_`V!2=G#PZjG53ne{FiM!b$u0V$)UbF9_2Iup= zbN7CD3uo@^VP&O!Xs`0Qrq;6WyY<7pa~0d^*H{_rcX5q61lU=ebHS6->EQ0G1RP=z zB%@k!Iz5$y0^rK$*tG_51ndwpx9;N_GZl2=IpyqYr%$Hf+!tJle5AradOe3rN;i)5 z3sA3J0V)?#mt-~7zm@ZnWItyK_X)eGr!VOZc!5AX zg{27FCGFSYGQfHS@vBgby7Y+QtwLlj(oO|`bV5)M+YIS{A`qgHjz(x3P{@jKyaIQk z*ou`!NkJBcdrQPml!uajy#dxoH!fl8<_a}k-d7J>`sX&KSsE=)7=Yke64a&T>5G}k zm7SJ7&DB(2kQR{o4bU^)qP2y^KFJ)&G>^2VH+lkDp)8r{D`YV(C)aJaXXvx^<#~Ej zx!G)&k^nocByC=)a(kt^zOj537v}RzN(0lyn zm~46@Lq8e(mJGL{_(r#PZGQU5oD92cDom>?lx<@iqp(3Vn#9!wB~3+;4-HuvOw7pe zxy33mGfi@p*$Q$B@(Z){j2VpfQtV1cJKg<_=6;TxbemmD&v5&l9z%tcDe2@ApUWgI zu?79IsFzJ?rV@kEL@G|wo(S_WXAWyNSHHT0Cn>zQRC1Z5LK}eI<#0_C*SWMJTQQyC z!A1g#c7c@cy)S`i<-@6R41~5Gq2`hd@a6vKnygO}8+fA|y9EOoG_pf5#O%XL4JnBn zv9VgF$X}#eaexcMI)~%4R_vPmvX|DntAJ1@LNTAcW{f$II_`Jn^y0m!pXaL+nns4xzAU+VF$c{P{P+RK+NU6f1Q zYTj>1Zt8K8Rx46lQ$qe;yfiyTuJ3&~$tT`*c|0z+$HN>f-Q%W=*%GyeuMSrf{Vh;L zx0K?5hwjJ+F7u>UJ*FS<1U%kK?=)sMySzvnx4Q~T!r>B6P-iYupXF6RtPzDtLPY+V z+ziQ$I9CgF&z+ETryz}H; zf!Q~V8hPq=_Nu9AWOM$gc~cG@nYds?-i)i7T(ehQ%ju-P`)hfv{1f0tyB*jFpuh$5 zp`)yHz!ryp8E|pKXD}R!!od;O{028Pt!Rb;ci4a0m$tLJ|323iC@Szphi)Bu-P|F{ zABGNX=P8yqbm&%-VQIT^8x<*t4rM#7{DFD4Ky86#p47VSCsL~NkC z4~9!UBu?cAGa4IbG{&SKIYWWM!a&H`HHx+i&%p%~*BfU5JamLMh&7!;6|{6$p+~H4 zavao?;+=cyg~3X#etsC1aSgoe_63*(XKsubddY1ipF;7(km5m;qUFbS#~zWwf7D)OqeL!D+ezfdi7Z40<)zxj4r6mcIpk{o62e1-9tt} zB8dr$q(@<+x|&9l-05kR0ZlG1f2BXEQl=*PNoBQy&IMT7t#iJg+?&i z(t=RMM1Mc`+ado9cXm|oG+Is8^lDSdhtFm^jOkL7GFTnT=$7+u)z>^NLg8)mK8%_{Gm zf;s@Z#nbp>mDk6vhh+wK8&%IimTZ`C&f!uE)Kc8(`I7pwpu^+dugUt7Rn)3=K$(lf zdF0|;>r1KcVl}7-U>Bkeu2+FIo;I%Ju?dw0s-{yRGVdEYf1}6F-i8`s-BvpWt+D#t zR0VJ0#g5|Ur8t_Tb(RON;aCI67!~gYk6LgM-bF|fhpfSq$HWNMLO{LP`6?`cR7^B} zd<^)WQx6RpjY0}kz=FHGHyJKs3EyK<5~!z^xdECFEi6?WTl)RCumKkisA@nxNsNyW zI1MmWL5>YXHoakka%evSoe9|q1co&{$z^EIp-ZvMBVR^_mwjJ;@ig~P5o=Yq6LL?1 zCQiHheFmo#EYm&rs0z{__S6IVgsz|OF0s+!HA=l|(pgJMANTYZU+yD-f4Qm$UV}1< zjfa0s<#&Sy-3p1+Yu9l#wWLEQgB?F05TAd9L z3Q0E6h@%nayB*5GciH?M?A)4@6%t1Cw3@Ly~}3oNPOqEN2!mgKX09o z^rl*X_FZaMCdVP5k^Uz1xEvj(Wj!J7I_e4Pm@+m`xn2+|vVA`Fx$sPZ5@$yKNm@kF1+Q4>cU8pW*FUVaEn&urJfoWAG`zW{W}K_ z-jV$4RjKmL;)CqrcvoTa{-z%sBvMgnn)JoAYWLMn>PW1uszin{GxgL8Q3XN)_ZzIl z2J@0u@{S}!042UvJ>adVM-|<~*~-eEdbA^91dG(Zm)5f~{*+94mJkr zP3Y@1&u=m5@`+jCgfS)cOa%@xg94;2yvm)i#9400DMNMCN2D8A1eiyVBKbx=*9VFq z17HP%hfbI|k=W>fc*`&gcU~^*NL{0?m$7`>k9pgW8TS>0+c}^+N&oFY&L^^K6 z6R}W;|H)H|?ABYdMieQ#3TnOCdYy6;O3RNxUV1~hirUTo*BgW+jhp&QeULn>HZEyL zp_Ry)ob6#s7fK{ws7JqmmzOqd5VeZ~k~|J}5*Q0|6jRPvoG~Yh39dk0pTo}OjKzzp z=*lu_ohyflb#lW*L}&$>;Yv>^0GEAs$7+{CzW!GhaczY+)f;$ zB>i%#oI?YzD|PDd?xzY^e^AWtjfzjhHo)B~{7VxDu)MYN6$~#Lpac6j7D?VYEzl!V z`lrmV%+$)0`7OR+0md&WSl~giAnv>S>AM%i7bx%HHu^0~$dbP+KSkCqyFriLW1$p= z%8r~t&{<{JVPnrmP9i_t$5>I*!;2Qb_1JAiMNenx?XTKvverJdVdKIzR=xQ<<^l5d zeHs1lf2e)Y;)ff(Y@fBte4kmiu35ZcII9_)YY-LSb zc>*1?!t5+`(4i!}f@6i~Dx1wx~S9Nu`hxbm1Cn_4qy3FNC?n9%a_bu>#r&YX&zx{%*L`kWNWPLi`2`d}6 ziJYg_dSOALOWv33L#8Ia+=B-ETvGcZkFRRP5H8BK z$=)FEN$LbO?z0!D5BNIMyJqwNRjIZ=)~ileQWm(Z&P)~_01CgXze!IDXw;RxYhvei z;sg4;w14UJ37x_1qh%5ppdH?WL|L$T>WOprQ70_#vCS2c`m)XJ+~%_SNX6#fRZ}Br z&6~D)#*EF=XpUTpLlMq*z&EBZ98zhG?Dl+h{GQ>}g11{k04f}c%@ngcGopd#q;X!9C z=q+q19yF>PNIn#(8&i)IL8S;*AH6}zixiGH)70V8;Nl(-MZ!j48?QFs0}R3Q>`Gcno>A@aRC*P*9qwX?+$2H zzCK8QkWG2~HKZCgXDkQK#w$Oh8@mU<5sP50$3R8p-85g}!p8du_BtRBbuBjsxSXn4 zz~zRvmXz^UgI7Eeh>Tg99%{I4R_-HnZhl%cr;k}$UnMUcQ&)+q2EgjLbWC=UXHnzq zyY#beeEMcNOA?okscm*OoVdj+B*} zHlUGVD@=kA=?}^C2(Ci3JklEhR6CaR83ZQU1z;&u4OL)hD1(A{Ar3W~@5`*HQ{@io z+Y!k-wqQ-ztp2fffAUUXR6L7+JC-6O9jUlT#Eib#fUdyQOpcGB$RqCK4?!3!0L zvt0b^>PX4pYVSPX6%efxpoES5fy6IS?q7V+Y{uJ8ay)k6^d?V(z8J4ZfSnCTQ2bt) ze`;XQlI~%77K^!`xkUL>`4z$t?|~@xW1{msi_%ef{F&bFrv0U3OF6A!3n}X z7$wTIDjig)3HXQzD$VC`nTJc8J#tS2$Q+Xm`zE}VNE14xEqvy5ZJ@eiYo@TuDQmFE zRq}0{=n5@ONV7dcvxXS!Dn<7&P%Z3k*5`$ zUt!j=3&rpmfcJo0W_9G{+FVl-=l?ozpe;AgVO=xWa_dx^-sYI&!0*&sErXShZU~y{ zM%HD};WkIPAw54(f!FR-z$NZEHfsDvhsU1lw3piN7_a8}qqHqs#$vf*LgKabtA z0B)b$g~i!x>^1d-8#|$lkT=p?LOU4V&h)2vt!~6 ztFFjpOt(l1`o`_H(X{!td&#HqS)X1~Q_0^&EOhP;}*a(7OaYz&N_ z;R&omD8Wn;RVn4 ze6S;}Xwi!OoCk>T)4H4MAEPdKbKrHp*!R^$85}txZk=@eLgq8KZB87v^tY_CSj1-U zgn7?wQxcMK@-9Nb>VIds!$aXej}+OU;W9 z(vu)>EoR36awH!8KnqVJPxJ9=HKu!bmY#<;2G(Z|r~4atAtd3Gz6)=MrZU|xtKs6k zWEqMJ5SD3Wsl4`#kc%|Ihg8jD88G%BP0!FZR;9W9xL!5!)n75hBJoqY1L`B zrtM1?(#z6Erf*39hq2B$$M~@Eu<@&mK*qX^XEQoXxu!Lyw=)Bo_n1TG?^@C<0m~xG zz{3ATeWSt?ONM?w!^lM>_+% zbmTfFIqq|O*Kyntcl@X0AI^MdlXIQ(Jy)6QLDxBViF=Xz3HOO?A={B%o;@l1iR_oN z&t`v}W6T+v)0%T4SI!-mdnC`87t8xe-skz*`NQ*97c>_fD|o$7EL>N3swlr`LeUYA z%TwdI!SjsgjOTCO67Ll6J>H*q|5jXGJg4~a;xoQ9-w@w2-=n@0zRyeYOClxnN_LjC zm!_2tDqU2%r}Q(ND%nzY!k_OS?qBCWQ7)7ZEWe@rNcqqv_{SprSmSGU=(9=c zWimXY@LpbJe3qJtrOO8Mq-(Ua9cl80rZRECB_?q=EmVsSuU)$~fd9kP@0DAH|KKs7mtT(l z@W8L-27Em!5N_hRg~Cn3LR?*g-xx}cLd$1iUS2JXMy(Tt3BpvAyBe@=5EdaU1^mT$ zW(vwL##<$B;I#ztWHra7L70x(XX3erK4D!BX+SSn-xdQ;ujgj)cH9IESMfeb#c2|6 zg^FPhrb|%rX5o5XehpfwJ`sSgUp25_ftD=?Oe(Vo?W49YK#vE6S{~}q?;-H7zVQ9` zt?YZG`o6kWpl<;EeFH|h1>?U|!}=y%CHzKbHjzzYli3tDl}%&Q*$g(5HM3c4HoJyh%dTT{*jzRb=DY>$db~z%AzQ>2 zvn6aPTgH~-9KZ^;lC5Gb>_)bl-NbHYx3D#AEnCOdvs>A1Yy-QUZDe<_P3%s#ncc;< zu)Enk>|S;syPrM4zQZ15TiG`D5Nt-<*~9D+_9)wdfA;Yhdz|gUy0e?@VNbH}vZvTy z_C2eZR~ldb$-Z>vlpOSdWpTve#Cyv{)3%> zmHQ|7M+>jApF#@%8T&aq$xg9fusA!-UT1HxGwhe_SM1kV;of3zvv*iKdzZb(exv7X zDX2yv!!0Y9R##tDO>wBYIvEGGJim|YVJ%;y#kE=-(c-8U*J*LR7GI^tp^<7_J5nBT z%j#7;6RB1!iB_wHqt(372n`9u{61oi1Y(W^VqQ67UO8f3IbvQpVh(Rab&xj(u?8oo z!3k<`g1j-fufYpy@PZn=paw6f!3$~dLK?h~1}~(+3u*8|8a$kMK&OtV4r%a08oZDO zFRZ}}Yw&QagO?9$aKaj#um&fr!3k?{!Wx_!4Ni>)r$&QQqv2Jf!Ku-nuhE{b(Vnl> zp0CxOuhpKf)t<-ei8)@i8k|}UpIQxGtp=}FgBQ`@MKm}O4NgRZ6Vc#AG&m6rPDFzf z(cnZiI8hC+s0J^p!Ha6}q8hxY1~00?i)!$q8oW9UUY!Q7PJ>sc!K>5Y)oJkRG(REOx>!3#0L5;418eIo9x(;e|9n|PLsL^#$qwAnX*FlZ0gBm>tHF^$e^c>Xa zIjGTdP^0IdM$bWwo`V`a2g7QA1U0%2YIGgc=sBp-b5Nt>phm|*jedhQYCi@wIu2^| z8`S7GsL^jwqu-!Lzd?lBXP@~_VM!&&`I<7&Dj)NK<2Q@kl zYIGdb=s2j+aZsb<(Q#0tzL5+@s8XX5UIu2@d z9MtGIsL^pyqvN1P$3cybgBl$NH98JzbR5*^IH=KaP^06ZM#n*oj)NK<2b1($ug-@c z-fc?!0jq@mmf*;mp~HAItX7S*+z6f<8KtN;7*eAeHHz>k#2=^)MM>6RliwO!E(re{ DlhOCh diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff old mode 100755 new mode 100644 index 2cc3e4852a5a42e6aadd6284e067b66e14a57bc7..9e612858f802245ddcbf59788a0db942224bab35 GIT binary patch literal 23424 zcmY&eV{m0%u#Iioo_J#0nb?@vwry)-+qNe*Z>))v8{5gt_uj9!t5)^yb-JtjRGrhi zYInOUNJxNyf_yKX01)K=WP|Si>HqEj|B{eUl?MR<)%<1&{(~)D+NPwKxWqT-@~snp zg9KCz1VTZDiS?UH`PRk1VPM{29cgT9=D?!Wc_@}qzggFv;gb@2cJQAYWWtpEZ7?y@jSVqjx${B5UV@SO|wH<<0; z{><1KdVI%Ki}>~<`46C0AggwUwx-|QcU;iiZ{NZu`ur>hd*|Hb(|6veERqxu=b@5Bab=rqptGxd{QJg!4*-i_$sES~)AB46}Fjg|ea#e@?J}z%CUJ zOsLWRQR1#ng^sD)A4FDuY!iUhzlgfJh(J@BRqd&P#v2B`+saBx>m+M&q7vk-75$NH%T5pi%m z5FX?`2-5l53=a&GkC9^NZCLpN5(DMKMwwab$FDIs?q>4!!xBS}75gX_5;(luk;3Vl zLCLd5a_8`Iyz}K}+#RMwu6DVk3O_-}n>aE!4NaD*sQn`GxY?cHe!Bl9n?u&g6?aKm z-P8z&;Q3gr;h`YIxX%z^o&GZZg1=>_+hP2$$-DnL_?7?3^!WAsY4I7|@K;aL<>OTK zByfjl2PA$T83*LM9(;espx-qB%wv7H2i6CFsfAg<9V>Pj*OpwX)l?^mQfr$*OPPS$ z=`mzTYs{*(UW^ij1U8UfXjNoY7GK*+YHht(2oKE&tfZuvAyoN(;_OF>-J6AMmS5fB z^sY6wea&&${+!}@R1f$5oC-2J>J-A${@r(dRzc`wnK>a7~8{Y-scc|ETOI8 zjtNY%Y2!PI;8-@a=O}+{ap1Ewk0@T`C`q!|=KceX9gK8wtOtIC96}-^7)v23Mu;MH zhKyLGOQMujfRG$p(s`(2*nP4EH7*J57^=|%t(#PwCcW7U%e=8Jb>p6~>RAlY4a*ts=pl}_J{->@kKzxH|8XQ5{t=E zV&o`$D#ZHdv&iZWFa)(~oBh-Osl{~CS0hfM7?PyWUWsr5oYlsyC1cwULoQ4|Y5RHA2*rN+EnFPnu z`Y_&Yz*#550YJwDy@brZU>0pWV^RxRjL221@2ABq)AtA%Cz?+FG(}Yh?^v)1Lnh%D zeM{{3&-4#F9rZhS@DT0E(WRkrG!jC#5?OFjZv*xQjUP~XsaxL2rqRKvPW$zHqHr8Urp2Z)L z+)EvQeoeJ8c6A#Iy9>3lxiH3=@86uiTbnnJJJoypZ7gco_*HvKOH97B? zWiwp>+r}*Zf9b3ImxwvjL~h~j<<3shN8$k-$V1p|96I!=N6VBqmb==Bec|*;HUg?) z4!5#R*(#Fe)w%+RH#y{8&%%!|fQ5JcFzUE;-yVYR^&Ek55AXb{^w|@j|&G z|6C-+*On%j;W|f8mj?;679?!qY86c{(s1-PI2Wahoclf%1*8%JAvRh1(0)5Vu37Iz z`JY?RW@qKr+FMmBC{TC7k@}fv-k8t6iO}4K-i3WkF!Lc=D`nuD)v#Na zA|R*no51fkUN3^rmI;tty#IK284*2Zu!kG13!$OlxJAt@zLU`kvsazO25TpJLbK&;M8kw*0)*14kpf*)3;GiDh;C(F}$- z1;!=OBkW#ctacN=je*Pr)lnGzX=OwgNZjTpVbFxqb;8kTc@X&L2XR0A7oc!Mf2?u9 zcctQLCCr+tYipa_k=;1ETIpHt!Jeo;iy^xqBES^Ct6-+wHi%2g&)?7N^Yy zUrMIu){Jk)luDa@7We5U!$$3XFNbyRT!YPIbMKj5$IEpTX1IOtVP~(UPO2-+9ZFi6 z-$3<|{Xb#@tABt0M0s1TVCWKwveDy^S!!@4$s|DAqhsEv--Z}Dl)t%0G>U#ycJ7cy z^8%;|pg32=7~MJmqlC-x07Sd!2YX^|2D`?y;-$a!rZ3R5ia{v1QI_^>gi(HSS_e%2 zUbdg^zjMBBiLr8eSI^BqXM6HKKg#@-w`a**w(}RMe%XWl3MipvBODo*hi?+ykYq)z ziqy4goZw0@VIUY65+L7DaM5q=KWFd$;W3S!Zi>sOzpEF#(*3V-27N;^pDRoMh~(ZD zJLZXIam0lM7U#)119Hm947W)p3$%V`0Tv+*n=&ybF&}h~FA}7hEpA&1Y!BiYIb~~D z$TSo9#3ee02e^%*@4|*+=Nq6&JG5>zX4k5f?)z*#pI-G(+j|jye%13CUdcSP;rNlY z#Q!X%zHf|V)GWIcEz-=fW6AahfxI~y7w7i|PK6H@@twdgH>D_R@>&OtKl}%MuAQ7I zcpFmV^~w~8$4@zzh~P~+?B~%L@EM3x(^KXJSgc6I=;)B6 zpRco2LKIlURPE*XUmZ^|1vb?w*ZfF}EXvY13I4af+()bAI5V?BRbFp`Sb{8GRJHd* z4S2s%4A)6Uc=PK%4@PbJ<{1R6+2THMk0c+kif**#ZGE)w6WsqH z`r^DL&r8|OEAumm^qyrryd(HQ9olv$ltnVGB{aY?_76Uk%6p;e)2DTvF(;t=Q+|8b zqfT(u5@BP);6;jmRAEV057E*2d^wx@*aL1GqWU|$6h5%O@cQtVtC^isd%gD7PZ_Io z_BDP5w(2*)Mu&JxS@X%%ByH_@+l>y07jIc~!@;Raw)q_;9oy@*U#mCnc7%t85qa4? z%_Vr5tkN^}(^>`EFhag;!MpRh!&bKnveQZAJ4)gEJo1@wHtT$Gs6IpznN$Lk-$NcM z3ReVC&qcXvfGX$I0nfkS$a|Pm%x+lq{WweNc;K>a1M@EAVWs2IBcQPiEJNt}+Ea8~WiapASoMvo(&PdUO}AfC~>ZGzqWjd)4no( ziLi#e3lOU~sI*XPH&n&J0cWfoh*}eWEEZW%vX?YK!$?w}htY|GALx3;YZoo=JCF4@ zdiaA-uq!*L5;Yg)z-_`MciiIwDAAR3-snC4V+KA>&V%Ak;p{1u>{Lw$NFj)Yn0Ms2*kxUZ)OTddbiJM}PK!DM}Ot zczn?EZXhx3wyu6i{QMz_Ht%b?K&-@5r;8b076YDir`KXF0&2i9NQ~#JYaq*}Ylb}^ z<{{6xy&;dQ;|@k_(31PDr!}}W$zF7Jv@f%um0M$#=8ygpu%j(VU-d5JtQwT714#f0z+Cm$F9JjGr_G!~NS@L9P;C1? z;Ij2YVYuv}tzU+HugU=f9b1Wbx3418+xj$RKD;$gf$0j_A&c;-OhoF*z@DhEW@d9o zbQBjqEQnn2aG?N9{bmD^A#Um6SDKsm0g{g_<4^dJjg_l_HXdDMk!p`oFv8+@_v_9> zq;#WkQ!GNGfLT7f8m60H@$tu?p;o_It#TApmE`xnZr|_|cb3XXE)N^buLE`9R=Qbg zXJu}6r07me2HU<)S7m?@GzrQDTE3UH?FXM7V+-lT#l}P(U>Fvnyw8T7RTeP`R579m zj=Y>qDw1h-;|mX-)cSXCc$?hr;43LQt)7z$1QG^pyclQ1Bd!jbzsVEgIg~u9b38;> zfsRa%U`l%did6HzPRd;TK{_EW;n^Ivp-%pu0%9G-z@Au{Ry+EqEcqW=z-#6;-!{WA z;l+xC6Zke>dl+(R1q7B^Hu~HmrG~Kt575mzve>x*cL-shl+zqp6yuGX)DDGm`cid! znlnZY=+a5*xQ=$qM}5$N+o!^(TqTFHDdyCcL8NM4VY@2gnNXF|D?5a558Lb*Yfm4) z_;0%2EF7k{)i(tTvS`l5he^KvW%l&-suPwpIlWB_Za1Hfa$@J!emrcyPpTKKM@NqL z?X_SqHt#DucWm<3Lp}W|&YyQE27zbGP55=HtZmB(k*WZA79f##?TweCt{%5yuc+Kx zgfSrIZI*Y57FOD9l@H0nzqOu|Bhrm&^m_RK6^Z<^N($=DDxyyPLA z+J)E(gs9AfaO`5qk$IGGY+_*tEk0n_wrM}n4G#So>8Dw6#K7tx@g;U`8hN_R;^Uw9JLRUgOQ?PTMr4YD5H7=ryv)bPtl=<&4&% z*w6k|D-%Tg*F~sh0Ns(h&mOQ_Qf{`#_XU44(VDY8b})RFpLykg10uxUztD>gswTH} z&&xgt>zc(+=GdM2gIQ%3V4AGxPFW0*l0YsbA|nFZpN~ih4u-P!{39d@_MN)DC%d1w z7>SaUs-g@Hp7xqZ3Tn)e z7x^sC`xJ{V<3YrmbB{h9i5rdancCEyL=9ZOJXoVHo@$$-%ZaNm-75Z-Ry9Z%!^+STWyv~To>{^T&MW0-;$3yc9L2mhq z;ZbQ5LGNM+aN628)Cs16>p55^T^*8$Dw&ss_~4G5Go63gW^CY+0+Z07f2WB4Dh0^q z-|6QgV8__5>~&z1gq0FxDWr`OzmR}3aJmCA^d_eufde7;d|OCrKdnaM>4(M%4V`PxpCJc~UhEuddx9)@)9qe_|i z)0EA%&P@_&9&o#9eqZCUCbh?`j!zgih5sJ%c4(7_#|Xt#r7MVL&Q+^PQEg3MBW;4T zG^4-*8L%s|A}R%*eGdx&i}B1He(mLygTmIAc^G(9Si zK7e{Ngoq>r-r-zhyygK)*9cj8_%g z)`>ANlipCdzw(raeqP-+ldhyUv_VOht+!w*>Sh+Z7(7(l=9~_Vk ztsM|g1xW`?)?|@m2jyAgC_IB`Mtz(O`mwgP15`lPb2V+VihV#29>y=H6ujE#rdnK` zH`EaHzABs~teIrh`ScxMz}FC**_Ii?^EbL(n90b(F0r0PMQ70UkL}tv;*4~bKCiYm zqngRuGy`^c_*M6{*_~%7FmOMquOEZXAg1^kM`)0ZrFqgC>C%RJvQSo_OAA(WF3{euE}GaeA?tu5kF@#62mM$a051I zNhE>u>!gFE8g#Jj95BqHQS%|>DOj71MZ?EYfM+MiJcX?>*}vKfGaBfQFZ3f^Q-R1# znhyK1*RvO@nHb|^i4Ep_0s{lZwCNa;Ix<{E5cUReguJf+72QRZIc%`9-Vy)D zWKhb?FbluyDTgT^naN%l2|rm}oO6D0=3kfXO2L{tqj(kDqjbl(pYz9DykeZlk4iW5 zER`)vqJxx(NOa;so@buE!389-YLbEi@6rZG0#GBsC+Z0fzT6+d7deYVU;dy!rPXiE zmu73@Jr&~K{-9MVQD}&`)e>yLNWr>Yh8CXae9XqfvVQ&eC_;#zpoaMxZ0GpZz7xjx z`t_Q-F?u=vrRPaj3r<9&t6K=+egimiJ8D4gh-rUYvaVy zG($v+3zk5sMuOhjxkH7bQ}(5{PD3Mg?!@8PkK&w>n7tO8FmAmoF30_#^B~c(Q_`4L zYWOoDVSnK|1=p{+@`Fk^Qb81Xf89_S`RSTzv(a4ID%71nll%{Wad$!CKfeTKkyC?n zCkMKHU#*nz_(tO$M)UP&ZfJ#*q(0Gr!E(l5(ce<3xut+_i8XrK8?Xr7_oeHz(bZ?~8q5q~$Rah{5@@7SMN zx9PnJ-5?^xeW2m?yC_7A#WK*B@oIy*Y@iC1n7lYKj&m7vV;KP4TVll=II)$39dOJ^czLRU>L> z68P*PFMN+WXxdAu=Hyt3g$l(GTeTVOZYw3KY|W0Fk-$S_`@9`K=60)bEy?Z%tT+Iq z7f>%M9P)FGg3EY$ood+v$pdsXvG? zd2q3abeu-}LfAQWY@=*+#`CX8RChoA`=1!hS1x5dOF)rGjX4KFg!iPHZE2E=rv|A} zro(8h38LLFljl^>?nJkc+wdY&MOOlVa@6>vBki#gKhNVv+%Add{g6#-@Z$k*ps}0Y zQ=8$)+Nm||)mVz^aa4b-Vpg=1daRaOU)8@BY4jS>=5n#6abG@(F2`=k-eQ9@u# zxfNFHv=z2w@{p1dzSOgHokX1AUGT0DY4jQI@YMw)EWQ~q5wmR$KQ}Y;(HPMSQCwzu zdli|G?bj(>++CP)yQ4s6YfpDc3KqPmquQSxg%*EnTWumWugbDW5ef%8j-rT#3rJu? z)5n;4b2c*;2LIW%LmvUu6t1~di~}0&Svy}QX#ER|hDFZwl!~zUP&}B1oKAxIzt~so zb!GaJYOb#&qRUjEI1xe_`@7qv_-LggQ$JE8+{ryT4%ldwC5ete+{G3C#g@^oxfY3#F zcLlj(l2G8>tC<5XWV|6_DZQZ7ow?MD8EZ9mM2oV~WoV-uoExmbwpzc6eMV}%J_{3l zW(4t2a-o}XRlU|NSiYn!*nR(Sc>*@TuU*(S77gfCi7+WR%2b;4#RiyxWR3(u5BIdf zo@#g4wQjtG3T$PqdX$2z8Zi|QP~I^*9iC+(!;?qkyk&Q7v>DLJGjS44q|%yBz}}>i z&Ve%^6>xY<=Pi9WlwpWB%K10Iz`*#gS^YqMeV9$4qFchMFO}(%y}xs2Hn_E}s4=*3 z+lAeCKtS}9E{l(P=PBI;rsYVG-gw}-_x;KwUefIB@V%RLA&}WU2XCL_?hZHoR<7ED zY}4#P_MmX(_G_lqfp=+iX|!*)RdLCr-1w`4rB_@bI&Uz# z!>9C3&LdoB$r+O#n);WTPi;V52OhNeKfW6_NLnw zpFTuLC^@aPy~ZGUPZr;)=-p|b$-R8htO)JXy{ecE5a|b{{&0O%H2rN&9(VHxmvNly zbY?sVk}@^{aw)%#J}|UW=ucLWs%%j)^n7S%8D1Woi$UT}VuU6@Sd6zc2+t_2IMBxd zb4R#ykMr8s5gKy=v+opw6;4R&&46$V+OOpDZwp3iR0Osqpjx))joB*iX+diVl?E~Q zc|$qmb#T#7Kcal042LUNAoPTPUxF-iGFw>ZFnUqU@y$&s8%h-HGD`EoNBbe#S>Y-4 zlkeAP>62k~-N zHQqXXyN67hGD6CxQIq_zoepU&j0 zYO&}<4cS^2sp!;5))(aAD!KmUED#QGr48DVlwbyft31WlS2yU<1>#VMp?>D1BCFfB z_JJ-kxTB{OLI}5XcPHXUo}x~->VP%of!G_N-(3Snvq`*gX3u0GR&}*fFwHo3-vIw0 zeiWskq3ZT9hTg^je{sC^@+z3FAd}KNhbpE5RO+lsLgv$;1igG7pRwI|;BO7o($2>mS(E z$CO@qYf5i=Zh6-xB=U8@mR7Yjk%OUp;_MMBfe_v1A(Hqk6!D})x%JNl838^ZA13Xu zz}LyD@X2;5o1P61Rc$%jcUnJ>`;6r{h5yrEbnbM$$ntA@P2IS1PyW^RyG0$S2tUlh z8?E(McS?7}X3nAAJs2u_n{^05)*D7 zW{Y>o99!I9&KQdzgtG(k@BT|J*;{Pt*b|?A_})e98pXCbMWbhBZ$t&YbNQOwN^=F) z_yIb_az2Pyya2530n@Y@s>s>n?L79;U-O9oPY$==~f1gXro5Y z*3~JaenSl_I}1*&dpYD?i8s<7w%~sEojqq~iFnaYyLgM#so%_ZZ^WTV0`R*H@{m2+ zja4MX^|#>xS9YQo{@F1I)!%RhM{4ZUapHTKgLZLcn$ehRq(emb8 z9<&Nx*RLcS#)SdTxcURrJhxPM2IBP%I zf1bWu&uRf{60-?Gclb5(IFI*!%tU*7d`i!l@>TaHzYQqH4_Y*6!Wy0d-B#Lz7Rg3l zqKsvXUk9@6iKV6#!bDy5n&j9MYpcKm!vG7z*2&4G*Yl}iccl*@WqKZWQSJCgQSj+d ze&}E1mAs^hP}>`{BJ6lv*>0-ft<;P@`u&VFI~P3qRtufE11+|#Y6|RJccqo27Wzr}Tp|DH z`G4^v)_8}R24X3}=6X&@Uqu;hKEQV^-)VKnBzI*|Iskecw~l?+R|WKO*~(1LrpdJ? z0!JKnCe<|m*WR>m+Qm+NKNH<_yefIml z+x32qzkNRrhR^IhT#yCiYU{3oq196nC3ePkB)f%7X1G^Ibog$ZnYu4(HyHUiFB`6x zo$ty-8pknmO|B9|(5TzoHG|%>s#7)CM(i=M7Nl=@GyDi-*ng6ahK(&-_4h(lyUN-oOa$` zo+P;C4d@m^p9J4c~rbi$rq9nhGxayFjhg+Rqa{l#`Y z!(P6K7fK3T;y!VZhGiC#)|pl$QX?a)a9$(4l(usVSH>2&5pIu5ALn*CqBt)9$yAl; z-{fOmgu><7YJ5k>*0Q~>lq72!XFX6P5Z{vW&zLsraKq5H%Z26}$OKDMv=sim;K?vsoVs(JNbgTU8-M%+ zN(+7Xl}`BDl=KDkUHM9fLlV)gN&PqbyX)$86!Wv!y+r*~kAyjFUKPDWL3A)m$@ir9 zjJ;uQV9#3$*`Dqo1Cy5*;^8DQcid^Td=CivAP+D;gl4b7*xa9IQ-R|lY5tIpiM~9- z%Hm9*vDV@_1FfiR|Kqh_5Ml0sm?abD>@peo(cnhiSWs$uy&$RYcd+m`6%X9FN%?w}s~Q=3!pJzbN~iJ}bbM*PPi@!E0eN zhKcuT=kAsz8TQo76CMO+FW#hr6da({mqpGK2K4T|xv9SNIXZ}a=4_K5pbz1HE6T}9 zbApW~m0C`q)S^F}B9Kw5!eT)Bj_h9vlCX8%VRvMOg8PJ*>PU>%yt-hyGOhjg!2pZR4{ z=VR_*?Hw|aai##~+^H>3p$W@6Zi`o4^iO2Iy=FPdEAI58Ebc~*%1#sh8KzUKOVHs( z<3$LMSCFP|!>fmF^oESZR|c|2JI3|gucuLq4R(||_!8L@gHU8hUQZKn2S#z@EVf3? zTroZd&}JK(mJLe>#x8xL)jfx$6`okcHP?8i%dW?F%nZh=VJ)32CmY;^y5C1^?V0;M z<3!e8GZcPej-h&-Osc>6PU2f4x=XhA*<_K*D6U6R)4xbEx~{3*ldB#N+7QEXD^v=I z+i^L+V7_2ld}O2b-(#bmv*PyZI4|U#Q5|22a(-VLOTZc3!9ns1RI-? zA<~h|tPH0y*bO1#EMrsWN>4yJM7vqFZr?uw$H8*PhiHRQg1U9YoscX-G|gck+SSRX!(e7@~eeUEw+POsT;=W9J&=EV`cUc{PIg_#TQVGnZsQbCs7#Q-)v#BicxLw#Fb?#)8TYbu zN)5R=MI1i7FHhF|X}xEl=sW~`-kf;fOR^h1yjthSw?%#F{HqrY2$q>7!nbw~nZ8q9 zh{vY! z%i=H!!P&wh z7_E%pB7l5)*VU>_O-S~d5Z!+;f{pQ4e86*&);?G<9*Q$JEJ!ZxY;Oj5&@^eg0Zs!iLCAR`2K?MSFzjX;kHD6)^`&=EZOIdW>L#O`J zf~$M4}JiV}v6B-e{NUBGFgj-*H%NG zfY0X(@|S8?V)drF;2OQcpDl2LV=~=%gGx?_$fbSsi@%J~taHcMTLLpjNF8FkjnjyM zW;4sSf6RHaa~LijL#EJ0W2m!BmQP(f=%Km_N@hsBFw%q#7{Er?y1V~UEPEih87B`~ zv$jE%>Ug9&=o+sZVZL7^+sp)PSrS;ZIJac4S-M>#V;T--4FXZ*>CI7w%583<{>tb6 zOZ8gZ#B0jplyTbzto2VOs)s9U%trre`m=RlKf{I_Nwdxn(xNG%zaVNurEYiMV3*g| z``3;{j7`UyfFrjlEbIJN{0db|r>|LA@=vX9CHFZYiexnkn$b%8Rvw0TZOQIXa;oTI zv@j;ZP+#~|!J(aBz9S{wL7W%Dr1H)G-XUNt9-lP?ijJ-XEj1e*CI~-Xz@4(Xg;UoG z{uzBf-U+(SHe}6oG%;A*93Zb=oE>uTb^%qsL>|bQf?7_6=KIiPU`I|r;YcZ!YG7y~ zQu@UldAwz$^|uoz3mz1;An-WVBtefSh-pv<`n&TU3oM!hrEI?l@v8A4#^$4t&~T32 zl*J=1q~h+60sNc43>0aVvhzyfjshgPYZoQ(OOh>LbUIoblb@1z~zp?))n?^)q6WGuDh}gMUaA9|X z3qq-XlcNldy5==T4rq*~g@XVY!9sYZjo#R7 zr{n)r5^S{9+$+8l7IVB*3_k5%-TBY@C%`P@&tZf>82sm#nfw7L%92>nN$663yW!yt zhS>EfLcE_Z)gv-Y^h1;xj(<4nD4GY{C-nWUgQc9cMmH{qpa!uEznrGF^?bbJHApScQ$j>$JZHAX80DdXu z--AMgrA0$Otdd#N9#!cg2Z~N8&lj1d+wDh+^ZObWJ$J)_h(&2#msu>q0B$DEERy{1 zCJN{7M@%#E@8pda`@u!v@{gcT3bA*>g*xYLXlbb&o@1vX*x+l}Voys6o~^_7>#GB| z*r!R%kA9k%J`?m>1tMHB9x$ZRe0$r~ui}X}jOC)9LH=Po*2SLdtf3^4?VKnu2ox&mV~0oDgi` z;9d}P$g~9%ThTK8s}5ow2V4?(-lU*ed8ro|}mU}pk% z;bqB0bx3AOk<0Joeh}Vl@_7Po&C`Cg>>gff>e7fu41U3Ic{JQu1W%+!Gvz3GDO2ixKd;KF6UEw8F_cDAh08gB>@ zaRH2Q96sBJ>`4aXvrF0xPtIWoA1pPsRQtU~xDtnEfTJnl{A9u5pR^K8=UdNq%T8F$)FbN> zgK+_(BF#D>R>kK!M#OT~=@@}3yAYqm33?{Bv?2iBr|-aRK0@uapzuXI)wE0=R@m^7 zQ`wLBn(M*wg!mgmQT1d!@3<2z>~rmDW)KG0*B4>_R6LjiI0^9QT8gtDDT|Lclxppm z+OeL6H3QpearJAB%1ellZ6d*)wBQ(hPbE=%?y6i^uf%`RXm*JW*WQ%>&J+=V(=qf{ zri~yItvTZbII+7S0>4Q0U9@>HnMP$X>8TqAfD(vAh};2P{QK)ik`a6$W$nG<{bR2Ufd!^iE z#1K58$gW!xpeYHeehuhQCXZ9p%N8m zB+l~T_u-Ycr!U>!?xu!!*6rNxq37{`DhMMfY6NpD3Jw zkYQDstvt30Hc_SaZuuMP2YrdW@HsPMbf^Y9lI<9$bnMil2X7`Ba-DGLbzgqP>mxwe zf1&JkDH54D3nLar2KjJ3z`*R+rUABq4;>>4Kjc2iQEj7pVLcZYZ~pteAG4rm1{>PQy=!QiV5G|tVk)53 zP?Azw+N)Yq3zZ`dW7Q9Bq@Y*jSK0<1f`HM;_>GH57pf_S%Ounz_yhTY8lplQSM`xx zU{r-Deqs+*I~sLI$Oq`>i`J1kJ(+yNOYy$_>R3Jfi680<|^u#J@aY%Q>O zqfI~sCbk#3--^zMkV&Yj0D(R^rK}+_npgPr_4^kYuG=pO%$C_7v{s@-{M-P@RL3^<`kO@b=YdKMuccfO1ZW# zeRYE%D~CMAgPlo?T!O6?b|pOZv{iMWb;sN=jF%=?$Iz_5zH?K;aFGU^8l7u%zHgiy z%)~y|k;Es-7YX69AMj^epGX#&^c@pp+lc}kKc`5CjPN4Z$$e58$Yn*J?81%`0~A)D zPg-db*pj-t4-G9>ImW4IMi*v#9z^9VD9h@9t;3jMAUVxt=oor+16yHf{lT|G4 zya6{4#BxFw!!~UTRwXXawKU4iz$$GMY6=Z8VM{2@0{=5A0+A#p6$aT3ubRyWMWPq9 zCEH5(Il0v4e4=Yxg(tDglfYAy!UpC>&^4=x7#6_S&Ktds)a8^`^tp6RnRd{KImB^o z2n=t#>iKx<*evmvoE{+fH#@WXGWs$)Uxrtf?r>AaxV0?kf0o@oDboJ6z0cgP@A$;k>SK1UqC?Q_ zk_I?j74;}uNXhOf_5ZxQSgB4otDEb9JJrX1kq`-o%T>g%M5~xXf!2_4P~K64tKgXq z&KHZ0@!cPvUJG4kw-0;tPo$zJrU-Nop>Uo65Pm|yaNvKjhi7V1g98;^N1~V3% zTR>yWa+X2FJ_wpPwz3i^6AGwOa_VMS-&`*KoKgF2&oR10Jn6{!pvVG@n=Jk@vjNuY zL~P7aDGhg~O9G^!bHi$8?G9v9Gp0cmekYkK;(q=47;~gI>h-kx-ceM{ml$#8KI$4ltyjaqP zki^cyDERloAb)dcDBU4na9C(pfD{P@eBGA}0|Rb)p{ISqi60=^FUEdF!ok{Gs;vb) zfj9(#1QA64w*ud^YsN5&PeiI>c`VioE8h)e}W%S9NMA55Gs zrWL6l+@3CKd@8(UQLTwe12SGWMqRn+j)QZRj*g)Xua)%ayzpqs{pD(WWESJYL3{M$ z%qkpM`jFoqLYVv6{IbCkL?fEiJj$VG=$taup&RL9e{s(Sgse2xVJlw0h74EXJKt2eX|dxz{->0)3W`JN7Bv!rLvRZc z0tAOZ2yVe4g9iq826qXAg`f!*+}(o1;1FDb>kKexumFS40KvK0yH1_@Z=LgWZ+}(Y zwYsa;OLz6tTA%gS=>8$=Z7pLh>|K2QElL)E=Q*(n*H`8R`8={-@4mTD-SWBOYRxV? zmF(-rJB8^Wlp?319rTrh^?QEP?|Msxrv?WbJ-+id+V#F2Y4(JPJ6U9bv+U1cIIH^W z)lg$_=g^Ma>2~Pyd_YOAv29Cb-U6DJO?NxnW7~QP*SmYi*vdUVuW#LWQ_u0`hymZi zaQS3Nb^4`ro$>0G%zbXmr5|D|iq0R<;S@?kr0j5Ruq87-Z1>crx%EzVZ9#U;{?}ti zW2W%*9MQg3Nbh%Ti6LhDd|-aFSgXoPG`mHlUU1iCHr>ru>DX?W_#13(`u*!Plu2OP z6jk=2>BC0l)aw;HCmxoYD1i4b%m$1`DYC_^L~ zIEAnFcHvad=-aO3(_MI=9#`z6-9*_!&$?<%meb5;jGd5Qp=MGf z6BD{%`L#TAOq%z%@*ib95Ey7NbUF=BlszVk3Iu3imD&*91N-ij%hW?W@~2TtdHTfP z#n0@Xd7X8Dyu36n{k#PwQ~T~X7mAO^cNV+z<HO@3X-# z_@rAn$k~(l@kciCC;&Qd*fWRI>=;fL{UPlciNDWyj$bX<#r^(r;EE8wwUVQm&7~QY zCXRj!**r^xybAEPq>h3W$uvI1j=yNIyzkE_D7fpGw)OV{U*Uwm{xB;mEg2(|y|ICd zMdQVqzMb-=XM6|E-a9kNh)^9lY`-DjhhHD1w5lufRcy+QLgJ47!fFne86#F; zX{ufroVBEZJOY?rDo!;Te6aOZ^1SO!dYRxQ*2njyA~dCWawn)>!*k7~>8Ikt&e*0>>V5ZbO|*1+2LFOqVe zXHb!aMk03^h%&9L8GMy7UDI2Kev>V@(R}*Iu6x+!Hn4~D@wj`P%#Hdbf(lK{+DD7f zJ&(v*mhn_e(R$^5L#bM^^Q@-!*b!l|+Xrb(q*MRFJYnrE7*xko!SJOy9LngR2|q5k zY`Ioiu+YBfzF{Labszk-E#*BYQk>$()=xWEGZRKwY)*UxP}0dGuPLZOkNJDI9Hy zFjfwiK6RjhH#rHW#B0(MW}i%V`943<6@Z*Nd^JEP5uZonXm=u%AM>{H^U@&Jy*i0s za_Da^xI6pMtXzHc{e~_ZcnKP*;=YL2Z^RmzDl{dJTk7*}E_h*NvgnhnxVKB59Duh~ zqouS_WoOR*{UvUw_K#OWz;gMracr%8>QQ&V*jv!8)ho;U8}9~8EU{N<=Z_gR%IpMT zbkePUG_afm=#|iIfFmdqkpLMGxY5D$`?I}&T7>TexU@v zkBx09kG)O;09ckj#(_Uov6vv{{HOcr-%H#DUQ@*GzF8Zh{iSM13%fuB%>wjdU@3Nf zlnYE!GTyNrqes|;nLFXfWU*Wg-9wmr=NBd$nCk+H?iwNvcd0Wab^3CT9a`>3V~oWI z9=_H+N-Q=MQ(io4u4mpdQ;k&5FXnKV5M7R`@WJ9h(GrAirO#XXOU{qQpk^B^Vd=Dt{wiqT zg-#j9J~@o%H2;W9mg)o6@*Vo;BSs2*4HAHpDk02mndAsov08R_48zJZ@J)s7+hyCo zy*0L#y)?AqZt-wX%+_Vx`8*A95OLHvs1$k~{h-_N_vov_gHJE=`X>L?5K+ zD?u59=mjtImMvd1GsDytuYp{IyUkW&?h zF>$#`n$~bZ)KN0B$XGeMYh&`;g8 zo_2-koaO6+8O!+L>SpIQbG(i;QW9UJi{Ecewlo?s&D!^>i$|#jaW}#HJuxt|W48=? zb^Y&O$a1s5ddr8DIt!sD!t=y1g(d4GR(s;s-HfV$GXl&m;+sAAxB^rk(3_NjE$p#L z*t4em?tA0d+XwRxN^OQwzbDZMuSE0J1)Ky{mq)^t4bnSl*)s>zNM@mMdtd78&ebHN z`!(|lE5q-p+TsRaNnMXwALaN5QIZ2IUi^Z22tsN5>nvIO+YU}Q*xh6}ee6@rR~<&1 z(PB4z>9ZBUMXZwSMmd9-aKKsmJeJq^G|#JclOh*xf0?^e0(`40nsg1z)(48;4}B_( zGwPI)yo|{oX{dVDL-5-aMGr;~vU1cPtJP5JM(sswz&Q`e<@0?y{YhsO9YK8EYJA;L z>7oG_Mts+(wCBC*Md82#XdKw&J*IizR?9k^rf1r{Ot-&>V^ke{9nI9zavlcNkIJtN z7T>?o|4rENk-?|lewZ(EfdR;%BUrzKJ^UkCpsM)EA9QHBVV8trT&*O(9?FO{MLTFL z=5P0H+T6C^jAuX0k4U;~GM!x`!X2N~3_n?qXY$HI>x@(DHEy&Q3ucT1R6fj28wX!I zC=&d$@bJ_v^%?W2Ngl}e8ww`b%BrN-PzGH;$@B2Ky1?%GMkm#~Okj(-Admyy;qya| zOi73kr_pwt?5Nj3p=&H>81!w#>Agj z(QXx{j0r=pTl>micAI_5vUw<3`Sht?Z}-j2Wx~F8DKCUQrsXl2?W8hur42(F_ zsSJ)_36&x6A|YkY6c<2a94SXbv~d>4CC4nkDPvf9Z5Fys^6^5r0j5=E>Cgy_Dk@tS z%?c}9!qB?t6t8(XMH%le8UeNWp@Nsma~Ql+^3Bo%_npMryeQJz4V=BAqE~T?dejng z3ge{fjCHoNAfYBvsfq;G%VL|j7t z`X0sy1EEgpyD;)tS1x+fnv-?C@glP0{RCW}Ma?3qpoq_&IJAYOy3G#s`rsh5=3>`K zkj``=;|*x5HSjZC zXNvPLh372q;=+6ja|SC!R-`JcL}}wwskajjTUGTpL(1zkN-p?BA2lmf+J3WsB7!k`0Brx8^cLTF9h)r+LZ$vsZo}`OpOs)?c6$hclR!R#MAeh|_DY|9r zy+_3c%IO9h9X?ksp?an&>Lw;QeQ`T-Ku6HaK~H?E9-Z5$cZu{YU;1+-6B$|JD;%!^ zt(4l>F8}a-UkC4YtOxFHckhl4VKr6P$P_O*U!)IDory%}Wz`YeFx6TO{y2Y${SBm?H9cTWV=WWJ z`_*CGso!ZN>l@~_jkeXtV}fczfA{TUkyeD>)i3|NFGcCsBmK3HXp&ol_@GVs7PIpfULy!hi zs+%KYgS%(n7_z_}6)hblk~W#LZ@&2)fwm6xkFP%&Ju|MFWbNiTwy{{g-pV1RK`L&=RE2D z4|g;~vd8xd|teYS%w!IlT4W$&FTrk-hcTADX!P?*f1YWEIRwq$Ys%^(Z9w&HT$>} zsMD#6Df=uJrX!JHP7<>Or;e_Cf=}`!`qR=i8fBj)$6Lxx{HRzd8Tnzd0p>kSps{OG zKJkml>bUj8$u|F=``l(-aMxWBC@CGZ#FXClQZ<4|&%jN}Tkg#q8z)=>Ly{$i0`rjU zvt|QddO&i=91e?h3>s~i;+6{ z8X4i6a1wDLrSuE#W(zhan+U*Zq+8p3a))JFVF4ffaV51K^YgTso~3;Y*NmM; zx8T?y-N0uyWY(8=me-HUC9xtABvX5~%yg+Cp&XF$Bq=OcK6T*D7eZ2EmIoCFWm{$S z1PNw8HDpe5hHeCusN8kdeb&f2#=3M^A~7YwJ7FRrhq*)PG9x?JIAaC{MV}5}g#7R$-Ly%)4=IUkRCGOR|XTMjn&okRmFjaO^YF5^* z@)#MCBOBezD)*xQNxydlUyN?dW{fS(s-T`gv*0BEnk}`BdmrbmPO8q8y(X$AA}*RH%I7Av!~84pudHb&%Q5-j zt?=6x(iR?<^_7X0v6Ys#VAL}dKk^hcjI=|EY;kPcZ_w<*H`_*|N7SacaM1ERD@6ab zg`!iTm7$URV+lpW_{V$ruR&A>jrX68k4x2wo$45}&wf7o<|o(@B!u-L@bKyQBAGwy z4#}UrRAu>^>Vb6k2-th^>WjvP;Nl|i3WrjWv3ISkj{m{eAcQIW^_ndxSX@|8T(ASJ z?_$fcP2u*6uOBk-{d>^ z0vWlfGQMvysI%R=iE|A+!!Nw?C917EU*_$`;;)px?s83CRd3i_jBN)k#nR5t$dJ(+ z_sP;wG@Ad)^(3LRj7q}0b2O(b`|i0~5SYb%Sjk^*5ISZ-Ab+}DGu$-X1n^TF1Ndw_ zF|e*1)cI2%`TR&AW~XpqpFb!=3cHbS>np9hYD_Mr5}y5Y`SY^r7isA2Q4(z zazRQEqWDKT2zIEbjSYdCPi1ZOGz80Nsl}gxO^DWMY0AV<2K&OL{&^6#@L1?lXu#6xSMh%3^5c*}oM6DQGY#(a^@z<&D zF(43I9e&5`h|A$5!+UFuOH0>F3$shBV4`0#M4RSB8=6F0ZgIbq<2LQ$Hh^(kAJu=! zt8ZGXTacD{(3W{V1$j_{Jc)Ka7t6u}ho`4kF+4@t_0!mCBn z)}o%eA}L)_L?=jw6BIfll7tb3n}?*yLt&XADa=rW>qz=_6s9ziOd5sXjil>FVFx3r zf>Feewk0v#W9>Gp4GacTRr>Sd2T6dWi-{YX`v!D)kCWzG5xQB=?es5ON(%nkwUhNl zV>@xkWWWv*N+{e$(SrExvN6BXzU(Hxlx27{VYHf+LpIbTO+Yu(ltMk<;)3A(LU@ytVYFkYvTa79idMtUFhfxx?P!)2F`prNWW#Fub#l>N2s@nh&n_ zA4{#}|AIs9|A4P0ZF%fy=hDN!t#ifH<)4u2kirK~JUpjQ-J+~cXOZI&dIts;P}UeXslP6zKvpEKSN-$y>kJ^nw2tC9bv zo(|lT@?vZ!{_l|d^8Yh)eEBh*5ABh+Lzjw+?V)o z#P-W7361>E(Y4;@`sv;VKn G`u_lkUM?>H literal 16448 zcmbXJW03CL7d?tTjor45-QI26wzb=~ZQHhO@3w8*w(ZmJ@BZ(tbF0p$la(=N#>kvm zE2(5vQkCfPhySAC*&%gOhXNAMqjXaM8ZdR9h1n(j|bAOHa3xsaUpVQb^?bFN$mKV0Ewcy3Du z@-8k$`ak32WBbVi`wx;7^0Pnwe^+&aJAe9T8!-8dp8P-m^j_k+W}s`RtGffD4+(~# ztFH^%r@=P?d_)fbz?K5R0s#N*H#RfO?CBZn>6_?x^z-v0gc4w+(WBE}13CaHLhywQ z!#%^j8s6#2z4_*~82qM%VW?EZaP{qr6q7)~zyRXUfu8*DIFkvyQi}2zgVP1nasq{A zzK$~<^8~1Leh9gA7?OYdWb(rhHBCeLF_~b@=XwJtb#c@X=&{tLR~#2+TS{-c`vBYE zGBWX|sg2q1)>^5WQl6tV-S^gSSDaqgl)f0g5bP3XzB_opq(U*a%n-{&Nsp#<PXeb*#gCojQ<~*y?%~jIH!wY%g9nHSRoaSF?Kj+nhFb0uC&n_VOmpd_OBYox zmnx5#Y6>`tg|imfwPr|~9o*VGw6l}bCod<5GtgOopG#Z3FYU1yX;{uJt(#*r8r_e7 zFtr;Gdot=wqBrPOr&Auqx9S#4&q}4+IV@$;lS%g;OwuPXe}-tkmpsZwyFbf2RoE|~ z^I*n!=-?L4caqmD0 ze6gB6sXkw{<`|Cx?yb^4okCyXCb!Pswu?l=&V6!>eVjh=XD+I%?*-Gd7M;9>8h)~6 z&0J!HkB*tz&l&C|b)oTW*SdHifwpF*1$>(yA`o_PKmUNb%3cQp@DV=5e(dQG!VdB# z4zOo2dD*d^}VrwZDE>cjbvV3uXQpX;>NPr?6LUB>JyOhwrqV5Mj1Q8A=HxZxa- zQwXEXE4&D0kFPJik^cKOC{0^_Gd~wNu89<_dGZ;!WUzzZ3ld}@(h^<$4X6-4pZP0> z4cT8q?NQVurwRI1@u5c=cK!0A)|eeN43pohgBKnf%Zphd-bWZGHIQE~`m`*h=F^&l ziYiYp2Bli;gaHnZjhfJboUR`tiB7foe6NfemF%KO8OT@`0*rjk^<*{<(SKi84B6$c zSAeZ)XeDt@7mIt)7s!bPz7`HP9ftqc{+RVQxN1rHewmj8Yp3IVyy5+hfQzfO*PnR6 zhtk{-Yu&KlSEH<_;xUIck%#8F?#Q96cq(tN&Y&yCP>~SwZF+9EW+Z}7E5H4?%I{Wg z(N$R$e70H+BskvgkMrx=s0NkTo4j@vUJI?-vt>?b>ZKxs;_5=f0G)6f@U^u0(`_>iKBH|X`>9ka9q#!rMTZ#DaG+DNj4Hb@5WUDRx;OQyC`$YMi^IjCMmr8 zI(s_$k$_>i*!Zw?b0n%}L?TE;8iYNv&D5Okc@@2k64bhgEg9atc=7JTCCwE4`m2d) zotf55o`s|4kAD`L4d20r!>w61;4e~qalSSgRUGOBHl z9RTUz=#A|RA)-_XJ;fPvhjE(w=K~z`rx{{e9EixI()Jy>7>q7pDk!X2)o;7@b}3Yu z9i|Jv^->~KNaK}*?iz`k`wWk?k2H%PP(=B6#}1W+=RSZgxN>tnUk$!WK4gXlQ5YlR zTsK(s$>9-qC_*h|B?@VYC<>v5_KI>C2z_VFA`o{64(?4{0alZ{Nw|H`!{CqynYP_3XpLG_k ziP$}NfO!Bc1h;p(xMku(+}e9AFC+)*b7-cf-zFY{y5q^zfrbBu7o09H&lgsnQ0~~g zy2GlijEBH%4KeBzhNc5k{iK+Y1-<2Q>UF|@>0Y(&Q0+KPt-?=>*O;tSLw&e#b>>(F zM@%`Dp)}XMSMJ?EoMgkl7E2Dlkm_n=3YT5*wm_QDoZ>7lvtsY4O)?QU&&U>WL1boz zQpm^5oPSA<)4GyW3E#Ps%#pgS9&NNgd{L&{3U4mAPIsPKsgeU0qP%W$`ZjtthBo>w z{j$ZZ`}y)?bf|%(x(~j-JG@sY%R;$v#5BH_v+zHz7j`4+RX_0>ExySHVGK_8?ls$< zCG8GiJ4!l$_CUvA=~B4lvLPO5zU!YI$VaRmBu-~t`|-fjE8m|b--_hjHI@%Obfn<5 zqFvMMzZAUzVr-;8sF5B#27-ldl$|mdx)l)mQQFu2FIOtOc7Gu;oB3aT zkoEXW@GtHDhHTLayMa&3)3q|?*fC_}cttu?Q9^2h4(mFdWi>)r&@Pv28u{R72XTH0 zZRuM=#0U~(p`Qab%BV&JME9I}R{we>pw1JgB;y5-iwrmRLHP%hMOR#-7%AknieOMN zo?28Tc1wE+o31Am+Nv4Dye*YinTqC2UW;J%&TbQ$KFih z&(4l%v^}kxB%IPw1bwe_&i`(w`EDZ;rR4y4yR?*>qOb6Ki?AP+?18T2(HMlK=(_{9 zdm{~sd*AEH(5!TkVTELf1xG!^WBK_T~kY*#Ba=bK-yDs2kr{xCsRh;tzmzhb6>9 z!z+!FI)u7k9fl1aR<{6Rb(#qU59Ak=h_2T0ar}&kf$rP4^hRW*)_l%I!1KROf`P)) z2MGiZQI*|?s^T!TAY`p_e+dw98bH9&ELHjiE7;c;&=hB;DbKUs*7chHcwS>>?5k2X zp7QG43(FDIEQzG>$ws8!ZtSL+a~6-GO3XhBmGXD*rd@xN*P6&K%~IvQsKK~mQb@B& znOIXfL%=A0T}>ki50;ffb)L6t)Hpo7O2uKpP*QnuNkvcZ7+jf1M9EJKck{Er0rd+S z=^O6^6DG2}`u2S{E__E%YL(>)Yet6OO*dmT3ItOyJl?OsHTW3*HpI6^v($s$sAGQW&Iq+~bF@Em2$N)h_?PSD zFNSos=ZjgM*=UQLi`D+ET-=unMuvArE5e=BJ$R=i1hS?y}#89}ucRG*1PD=%dmAiyfM#)nR(>UJ0wzQnF2;OY3FpZoVXs+cy2w5;?GQ$<2e zu|#iFD=ow}--1<8ZyobjRWkurqBk9Rt{?GAKrI;Q9zBLzZJaQ;ho{E4;I!6;pT$iX zS#$C8bIak_Kk3dF92Spdm6>ggwrk&Z%+#hbn9KM1UQBdba`4JOzLqFGQ$(Mc6`_Sa z>2U(>7)j=}3e*Pz?%(KIyA1H%1{)%%Nf*%@0bM+D+(`kq2KwZ*I4VfHF!=@9FDvf( z`D5Cx&Iap(E)z~MuBMM|Ns<5%P%f*;vidnD<8)(8dNv&jv|>5$nb&i>+#`geKYw6} zs3PT6u=@HGWyd^;J@9Q$(ot!|lp4;Qrkl549^Q|)eBMOVeorn*`w#^4TIQ!@;j7&} z9jKr9SzUF3jZ=DpFN7>#&2XI5qjeoeB~fm-glu&dEb0p1Vc|JcV|rPadNR7eIg+YT zLWliky9=Z8uLXGp{|#G$P#Gg@h1E>)KAdDmO{b&8e2ke8G}t7k_78@NFc#F0JXn|K zBvx!abv-#UJu8Tw>T4$Mnk!cA>%@Qq*QbZ};0q`@1DY5aSuFp7Bp-&rG7uC;x6rA7 z-&=2G!#I_&T8pGOhQO5XUKHg8{w~_v^~rQ=q+?je+e{P>8?c)n&tiGj12TFTV;$st z=imv0loSAktP4ipl*=6htfl+=WF}G)C<@j{hH6KSSnUA^irkKXuN>mhbMO<&)L9qz ztxRgH)b)$4gWy-G7G{hdY%H>OqmH8Kiy4|O$&Qj{IOnqbUcP|=?pi__3Uy1aLIaXT z;d4MJh&5FK?Qa(sU1p@pZKR<{N-QlW{S#Orx5zh4 zlU(^I9ua#zo)9`cmCW5Kvt)91pz~0b@&G?Uw2oD%2yV27VTW}>Eenh@0=U_{(9%HS z*C(a5G=1JvO&8Gjti7os4ro{Vz)^K%IlS?fIYb%(zC8>f85Ll-9YkHMM6S$>y!cYT z1!SeBmg^~lOVX+>Lz83WdPQ++h8if4oWH1slf@6-32CtPG{~*G_I6H&G&0VYX-=$# zq7{EUG?nMAbXe7^NV!fPq7}KKeYt2&Fi7xVgvFQ%z4Z~Q27(JT@Cadr_?d|J;tJeEN9xPppq8Bu@=l-p?5xgbM{uJIeJS-PkEfhDz|l3rh3e{N z6Cl11KlvT7)QQ+Xl`qK>!Ae6u1K$q+%+?(XC?gGoN4>bRfpG6Fh@Q{H2N^RdDSz> z9#GX){2iX!;5fyiR~cPQ9@+BDz*xjn<1~BopQ?g3p6ZM_OE~H2fF1hvX;z=qfH<`i z_cPC*N)R{+*jZy%z|hj71bRpZ44Wm3Hy?9bl;fDtL3zH{a`}+!);WGv8VBmF(Ag<5 zvs#%3Mf|+(y)9->pV$x9Ce!7TyyjVegn{&u;Sw~l<2as_WBAt>PSk88Hc28D;TW4s zN>HnoZ$=YxHg+OkcX|B&kQ=@aCMH^UV@sD1ZauA(hjO!9ebL?KskYqa;piGWM1P^y z1@Y3$$V5t!4}m9XMbDLXadOE(9L3v26t;yxGY;P}ZbMx+#Gh<*J5>WKi==HW>GtE- z0k&s-L-LJ4?!0cLr4X&4>&$rrPIuZCHv!tRJ0`AyV#S}yU?7L`D3Tn$iMEOF*nn=M zIDL9;bkMPXrQN-JL+W@>%o%^wD{XBlQ>A)+uI)nFTA&;MYtebFrK1q-&0p9k<5VSF z@?(|%Gdp164bk76uKRMb82gs%moxKY-syEm0U^sI38*rKAiLv8C(>6E0j2T zI4B48ksbj&V)aN9gVR@x`Flb*{v`D=w&v8`MavBqkxb>4 zc~+y2AGRQ?Uck}=nxIDfq{ zd;hm3d8#P^Q#M5dNa3yGk(4=vl=k;PViIqw%R~LT4L*_kZ&GXvChe3)^_otV+Nkxp zwzDTrd>n_#DJ5!~)aSi&x9#_%1TxNL3@+q9!#3q%)Z6q{Z&kvpb?l?tz!i;sptI0` z;AF`$Oag5*)Xjp3N;T0yVn{^qBdF6h)Ck_Ue@nNQF+6W9>e_E0mrQRrBSGbVt!`LH zuaedju6j`$BvedYKBHA2ecp)#x8ThyKcL%t9zLH^{mpC>c*G-&;?>pDU6Zr|Y0WCHAfrOseG`WZPzMHfc-H0N> zQRK|s>|TkRlvYl_B)9L{Z4^4UG~h9l=gDh#iMZu-lkUBzpq3oxA;FJohjMo;j41a3 z22P0kqTrNq(`H}pKIwGX*)WfYX5tw$?mhDxE^3s-%sce9W=+wsS7-imPiGXkgDsM6 zowj>a_V}8QTB;`$Cr&tw#D@sFvE*wgI#!HW@wE`#gc6z(W0-fGSMu^44^NHXUmRo} zjD*Umr|s!tcFJP7>E7ch*6h#Me$J)$ULRJ>%&@s^%fD<}tyI4m=q(~k2Yj_PL@fOF z-`+Ipi3#=$i7;V#TQ|nmYadI+(l%B@20A_0h7lYrR>tmoXD6#*RMKK+TbdvI&Ek5E{W>TYiXL>cS-q5P9fP{aqMdq{g1fQ4~^4 zB<@ZMjpvP~FuYacPKg{Q#;1f<_zn4dgEE#2)(9QXIn~_#_hpayOcnnri%k!k&iK@o zdA4n#?9<(2(yYmL*41h6&YyLQs>SNJho)Ae4!c|Z%WeB2;_`&pQAN4O*{8vR4$N0D zhhEvoTE#EP8kJ#M$`|397jd)iTV#!BqUZ3uP!M?TMyhw0K{W|snIa!*7SecH%O+)y zBlwJ?4(CCz>xC!&*J+O?! z=_McM8)pWN&%c)@;2I1TcTq~;%rhf|p}0Xdve(0rcre)J-M@KB$(rDbbK2Cf84qho zMTpD#+f}g3mc3wKOn`4>|5XdTK(4L-4S9lNkMn{)-voy7QmHX9to!YvVlg8UCxLVY zCbRy9nS}dFo>PfqDk2WfN!t592XAU}6~Kvfu+A9M7_x(C79i@#lgQ}p&DhNj64FI0 zI4sc8w=JauYjuSK_t@mZnt)=kVrjm4!>34cswwp-vn0%WlVZmhF31ZR7Ptv|}&DCmE8RN2m3rG}~5+ z07c@dPb{WT!B&%LSTsSexqny^i$20G((4$QdvnGZQjq(XfnQV=5rgQdCUmabx9?zK#wco#!O>KX@_k^Je2Q$W*QEtQY*y# zP3qZ{M%>vS@*3Ru-N0RMn#E>5)5JJTgIn)vmpeMhqMH8acp{Uxy3Kv#BhBFt{omz% zZHuxMCX74Hf`Hwa?!BLx(O6;Zh{oh1 zk9?Tm2WBR8GEiCj!Ywjjg5qkgkPm)OBVoAa0Anb-81s@YwA8POu|YybRh{Z;Y(#=@ zawHH3n>7}m6HFy7o)u+jG#HquHrn`{XwYP9Kbp>0P{)$LPq58;1P&37^OF|AYi;g( zE16q5W@YMaw(_GY8gy8eh?GsirgiJ?)11BHon@2 z2k?CyXF^c}@a~onwJ2e|$bbMr`g-rOR3+#ozPd#1YrHd=nv`(%_VP<2+PIWPF9N9H zq+6r#yodRe~GJSDxd?Ysbs(A`;H~ z2cshGOmhy@h`h}Qg0l#en1aR&tgOq58Og{h_aT_b1|_!y{)7i=8)AC`425Fh09Ef; zN&2hR2k%RQ-Ib&6T}w&$)d#LE`~BN1n`xW2bBb!JP938R*}P4syXwi|1=W+q`;6tI zlglY7sem`;(Egfr5sE7uEVom^we!@iKGxnxZ#qanxh7>x2W2Z37J++aIyhFb6i6i+ z-%r|}!ZM=pgJka17$qBs#RWv}k&v)mVoP!e>9*5Rd|tQtLODMmYupBbTRto0vVNE~ zL@KHU%7Ug+km4GhdVO;$7N^1Z$9eElbk#&HRa2IB$&aL6F+ZZ~-%K8_&lArt8ZFNa zZ>>@-;66ED@^3F8hF{M-hN49}Z?RN8x47e(yE^-6Qr1~~``1k+jokRzdZJ#T ze?CJnKrp8Y165+f+?bw+@_Y?%u-$k&ci>&Vc9##X6b%V5UtVQ*F}#yDp3kS?#jw{a z&8gS$#pxj?^)F+5IVA)w(M>1t0UW|k8er6zQ)6(%j<9)3`6h+jSR~?fvI3fPVJVM+ zwCN#RBLikE)5lbgaD2zd0Gq_Nk%QjTkTEbwie6*tgDY65K~K&^CzhMnZ1OIY#TcIE z17&d65gVw?>P|QcQFP0(gEe1c%<%(p$kg7L)n0cfC3mJtR?d`sGa2(^aQ6>ISNN?a z-J^~O2SXiYVn6bO#&kDj*^5@Dq(FM5XiX4+0uyC;ECk&Q7&k8-5s%231WBA?$q0a9 zXMy6;|QB#W|+(v zO`d8rhA}$HuBy9OscnOYCeZFokYRpi@1bRp-I_&4qY0mz)dv8 z#psFjfRS)w6fSp|gt2NY0OR?&ol6BnpGjYkiYa3CnjR6X!%qwmPg)L#a&-Nb{oV2H zO_$lCeg)Jzczqn6q+{^q-BgdzhMM-Sbi>iS0zdfdq6(c8zG7_{jgca5gy~#3d7O0} z#=MarJ;x^wl?0x2m=3AZqWyJqK?Ge;x4qX#DpG8$R4pVvS1%z2%!}@Idi(P#hs=l0 zbeX2*YrM|Dr`N*!Ifv|L#sj|afrtl@aUa4)SDlXmz+EP`&5FD zH^4h6n@v8B&1dA=lz<+14Z?%#FV_l(PX(uP^O83`(#wDb`dpW)0(y8nGWxbRTN4qg zbPU*fXZ^u~Yy|M%@qq=pIZX~a)a<1{R}ixEQ{PwCmvJcSi??WZ5K>LnI@Cj9K={AN zbtd=RRU~KDiP{d~1tc=>BfLc^!n7cB9`KcuG*3h%hC>>Gc-FqGJ#D{Az`w4n z>;DvS&)uSF;os}x#=WTf%HmFzK>{QbkiW!_RO6LL>ck8dr}b%)tf7M}m$@%eVNR~$pjWIY>)K76S&6D)ErTYo$!HbpW?J(LEb1Oh$ZHwXN1VXL70mn0hQUgw2^-o1YBD=iZc88NCXQc; zG}na7)C7!ox@$qVt+U6?6dipyH+rh4^T|;1{c5 z+KB?(kr}w(*g+=mOvH}!!q=G z_xI0Tg_ykAxA`S9xAJZ$P^cB4EX&1`Ps=_2hRR4R!B zePQ~o{hbjJpb3KMMZsq1*J@(r{ltu{JFT3YkH>GUB1~8#?T>dK(ZY)hUEV?TAckZEm<8m!rW?ciPRR}Sl6Yh7Qq z@;hYn@cSF`r9^T-)LuFshVKpK(d^`c`5B{_nCxn(lLIv0F)EirmwNF7Guoeyd}Vkm zve@n34B@6edk^VE|A2|r`k( zRg-Mi;u||Z`OySCTK3@T>(UrSTgPBLBFc4pTFx2xHmpm;PO3L5{mkDGSOUGEZ$3!5 zLj6t*e#X8riT-kd@x-b6y~G?N@rX2u5QNA4ld=4cAiA!g#TjIOw^LMNR>9B~k5|tu z6}X36Ay|b*C|MGbBT5Krbc;*8Q(0;IU@;5{`tp^#?0HS14m5^2BAtv7Jr<^r1yQGu zP|-$dQdV_YmC&%Ml2j@pjzKzfk)XN2JhaOcS<=ftV9^@Nn9S(0f6rT0GqeX_^pl{X zRfjUNPfT@zW|`PwNr9da2U{AeQ|S;=R!Bq|Ku^+a?TuGF-A+MX+36CbQ(Z{d2zybS zgye5ZsWq(9HY{3t;~hhCbOvo9fcxL?@`w;9S0%{PnBWwuFQv>o!S4U=j2?e6q-vl@?G zk~X>MqMKZrw9{AkYtz>yuM4k*q2jbBOI6D#~xqViag*hj9#4yU#j=25+6~h{c5z2|Mh?PZe?Tuj&(Su5)z2AX0V3TOflX7$@yQZv$<@WkFiv(@D z#q*Q@2#_7oiKZ-KGIjCmroEgtO4+{>u$!qm+{V4gJ{&}%Je;oN$4BHJ??a?9w%Qn+ zA49Rv&qUp;b?CTvTi+K}?3$;dHhk{7-etD%(>%^w>PoIidH*fMSkYjz`n>h_E22eH zWP2%hnp{~e%kyA5zbbm8eiQY;R^eibVl@I|K36Ttm7u7d>!RA5qLM;xI$|Rk0aF2) zkQ08N{@vimdl`nE5-VHIvD{d2{e&fI;$>lRo}pCOSZNvkO>;G~q>pM-A9rCpgMP$G zWLM)e+H<~}Byt%;WYf|m{|=_vht2D&3hH^7!^#E@E6t+KD;tAYn#PR=w}VOBPmEg| zFVg;q-Ik&r)BN*&9N~=b`kPs^IpEPMVa>&Od2zB@(r!B?A2Ej(DT!k^ul2^#y-_7Z z7?2%^K~~D#ZBVWkJ>OxDi3|>V;#!jCPOm0`OW1~)ECr_^6%~w4oZvjvP)Dl~9p%1gogfOFu6PbC5kIiBpYj;{s!w655Podi3k^ zSY;L!&rb1E6)u%b+IgZ(lfz>!iiJVA5lsc&LPq;}hTQHBWee3>ZNv3Z=n~29XfgUZ z7@9a>q^mm1nTO6E=P`_GuWN{RTvOTsRy`GBffl_SeMb5?X1EsJm&1tL2X=EcYX5|B zgnsne&jRtH8Z?rnneHz$2@{_;BUU;!Ix%egsGc1LxW=C?kK!IH2K&VTG%km2N={MP zDu@Y3Rmk8EE|=^HZ+8aS`10U)bO|FJYMbA?RzVEQBlp5+_bOZFBdnZKqtyEfg7Lyl z4adqX_*%-0bpw<^A!!js3?@B)M@#atJDMOHk`m9qL}&iI^s8^z37kB^6nF#kbL}L$ zhp+R=>NZ&qczRWV#K5@2uE2C-@U7c1kfcUQ(5*<%NA9NzM&W78uQf2@albRKYyS&t*#b-9 zCxDExUpqG^6>dJ+N<1@{U39t94_ILuf_0O~AYIG;^>%!k4{xn!`(kA2|5O_x$J9}n zEmE7PW<)Uw%m4_GH>Y)d(sb2|WrJb|iOJ#9+XSU+53T9)rL0@K-*{#g>M~E$tPw(A>A*=(>X}~13FV?jQPpzRnmN~C|6*YBW zklLeHW@NO5Z)YrGuPwGO*R`)bsj5{y0u{S_4cE3JT6iVS`Sj<%N^~Zz?qHb8VzPFM zTOov74bZ1&W@=h`Fzm?fb}Csc!CweLKugfg|EA$!Gp|#fNaj8i*c{;o+uGdA&cPsH zlIW9@|A91NkcXwDplXVQX!DQ)ila%e8v5}3H)1?N3CNYLwbag@wLZ|9`)VK6V{j8Q zOd-Hf*EiA7f+HJGAVLeFm?rHg`Yc~1X>EkG9^Dv>XypCXxJYw0NMF?z;Ru_?V`rr9 zuD*C)vplMXD|@OUTP(PJES$X9Zu-u%ncLiKl35Mh7OvM6+ZV>pF5Z-j^5&oz|MGOX z=GQ#pe|gY1+g?x9)b1o8Ve@=?e{p-crf3tlx<0R?{@!#!x5dn!(bpKO*TuG#9(Adb z>mMSqiR!|`@m#6dYI2BL(0(UDHJ#<~#&J1yp~+OAD2ozOJxY`SG^+iZj04%zZ`J!W zHHkAIL;r+~$hJLV(0FbNIb}6HTpN+p)`3P2D+kuBpz$q?ozCf-V-sa{4u8VqWQ%m8 zRp7qc-EU)R%2NQl-9VK_Xl`g~qbSPDGvyx>IKg%hk!W|WysrV(81RSC$C@~NEhoAo z6#-eZi{*D9_f{)6I18^4|F8fp%16TI&tDp?FL&%rBYne-$ly1znJDh@%@~A*!?pk^ z$|;f?=ylF6FwFvS-=0y;n+I(2l+!Mxk8~J8OUemtH6*ps?Hp)#bUPns@EdOSAdcnvO?&cBxRLd z-c8puf_=_Tv!OSJ4~py(@oo&m0@>14&?UwKtrqYuz$&~t(n~zbfzg+$NuhNY9P)Bz zr)rGPm8i>=b#Fb_lKE?m*Y2L@lLZT{;;J_t@+UYN(c3jTUVFHE5W6{Scd{>ZYDAi* zt$FzH6gjxF4a*w@#CsuwwB12*hS80^S^`@%ZzpV;1o1ad_Z^1enve=#4b@=3E znJ=I+l%sH}YHV%F7)xSoCN7m^9iCC9eOjk-_nx{9)kb4cFt@wt*J=SL``S%4ACo@n za1@J9nI&*4oH8=SA_pGTclike?rlZDXP+PW;pqTs!aY2pgh%cl1IntO`9w}q&VnQcj9M@Rsh3=x6Mu?_G{(GY zby#Ytdq!xOqkSHU2#-)$$&dnIFr#tJCo9c|1RSm;4BWCwQ%Jm8qKHv%swi%1=gu42 z4ELwEFBh?KMk|r20=Qf8*D`JY7!R2ue!tCGUl5%)`x@lA@+UmkXODnW-V+N7$mT_4 z);HKUib%U=K2W77KDq?~q!bvC{;%FXungD)p|19n*txf1w9Sv9eG5s+oPXGwyv~a& zs#faFU&SgRy>F=J1m5S`_dTNj9I4t~>o|fgoRl>1|J_9|Wh_^1Z=7N5@$51j3?PiB z#f^L-Zs}MbTD@e!Y(S}rA{jAgrXa}*j0Da%$W##b9^8;KU~OBIOH^?-e6^WeNihdT ziPXHKHoG8~Z41%*(v4TfPe&n()yErElCgCfxz7kfRFt~~slt}UCyq%BS}GI?Xzz{} z4MRcUC5-LX*GhQwV>!%c{ldLUO;Qql{iqih)zZ{waPl(n+ml_sD@5wsG)8JFc*qe< z2Gy+~+JJT`VJLH?u--2+IE#*Wdy;>EY%ZkHp78V_fSxYB{#?9Qi8FJkZmW0i#TxMC zIB9xg{{(Yt)+^O|UhHl71Cy+>sPC8t$2pmYc;f+`#toUuiayt^J!hihFMz{jg0Q^M zvga}|vw#J>1hc)>MZ=BNAhNQ5zNXyRU>i`})luG<6Qxfw|5Om1ogK-1F9N>g#e2&G zu#`RXE>=j(s-U0D8}o$0{{CzX^j7c<@H&|vhUVPS$+1hO2zs{)0-3TOoRMdaCC`=F zAKR48D0?_r2reI}-2t=L6SP&!Hy8BD5=vur=)YLSHhvnm0Gfz;Wzg<-xm ze1%lC6#&fi{q`N89g}Ofx&z~#eOV8}u zf`^kf*Uv!`6t_yWNwh}K@9RcsJ}ENiRs6n;%H8K|G}N=2(kwHYi%k^Ws50a=R#h8~ zgxeJ@+?k4-PVkdP&bXyN7$(Xg$%RzqAk95;xoe0006BO)ynGqiyuYe~Co;tR62#YB z>U5WL`P<-{z;sDowb*n(;JBOFgyP_hi%r)% zIJ1qbh9DzClTf15Zvo)=>opRhCN80LG}fI6x;d&R*@=_v)y7zK04TP216M(Bpf1+QvxAP2<3 zmzy)@XiCJWn8_dtKEs{-%P&}7Moi%D3ZV~3D>y#|u`58zKe*1TG2umydw*BW(Sw?X z%go}e=M?9Fw&%eN!dL&;iMTFP_U(|N1|d5Fsmm!XqkS7b@V02=`*uz@C9fgHFky^0 z6eG;jm1aOZ#3LSL$#C**5_oqQK3@}2_#9{TvzqYs9Pv@)w7}MFTK!n_vB0(YQt$|< z^ymy2L6zGUc|E=3l%oCyF*SgCE7Qf&y#OZj=U;e!0s>iV5SP24b4wA)6slbkKPqVa z?L7vIXHveS>h38t5DB(K7mO+b>$HL{jmcsulpV9gIQ+x8|K(jy>TN9DWHsRd-ESVJQ5c}`_fCcA#g-Gmp zL9`a{aW52!x-Xv(liSJ&(t9irNI!(V-XjjUhIaKPVf1eo_X~Srh+bxvmvd1SB{2vp z%wybkv@OTW;}j214>YImKO4Mx*VExQxs$uc1oj(hCj=~pPXQce4-mYN3K~rT&4clb zV5Q3QA)*t>xFc<)$Gw1SYsK|7B|$F-FRzC1FnhN_gFTQu|AQqEncRzh0Z6B{M)+C< z?u7TwN`dnG0r#=owToakaXE%{HxfBuQy5p=EZ(YlaaVUr2=-6PP)+q>>hzs585^st zY6X>ID{0?7@ z=h44eJX;z{S1wJhYB!nt&1~C_TX)&^X*2?!zN!SN1c%|6_m5ayicG1(l*Fy;#;DzL zNcKsqTvA%YiB)@?rim}#*ZBHl+u8^>-_NuAuhV<%)0+B}?EN!mTw3Dx*D$=fr${(d ztqrI?OuuBAvJdwwJ4{1s#VOB+F3a$^pK;jc!^>uQA}tp0M?tagM(|)71f;VY>(F>& z5E?p1FmY%imeRp8ba6QUHQK$*NNA)javS{-@X&e zvtv0<#1x?N>6t|SePNQkwwJyq(K<7g@jJmdML2nT?gZO?nqU;AwC0{U8(w-dM`0*L z>xv;G(}c96S4)A_{IyijaH#&KvIJB`3D48TL;Ez}==}t%=T7tmytIby6cLutzXBlT zg%rq64!uz)`MUkLozQE9WyU#Ua)^a8;n>HbA^Aw^JVulCABWe7wT?Bmsmbw%BZu9l zbPU79H^?Pg&By<#ThlePHJnSOr_bI#q72{~2g`-%U$yB@=|A~a`97}QGD-s2vty+4 z?F!Pw8XCm3MuY0uqe?= zSwbc1gbRN{l5YYTfwFkLBUr^3bqOrHY;3XDO8DMMEd;wD9o z0A%eejz)}V2c{GY%pwWsd*cO1^>_UGe)vX~t47NI;2jX64Mv7}g@FM$!j#4Sul`SW z#=nm)7`WpG(9a%B8>tW}6R9039@&6FOZTN8uXkrKX23C2IrI@q5>*s#1UC+%g1N-D z1h%AO31q2m$!!U~l3m+Sw_b~0H?7ax{}s{iTM%x5NCr}ZRf25-dkjwlUCmZ4u4&Q2 zV|#9=YD>HC-9t2}IOGtf8q*v#9cqKe3*L?AgY^yb1@hqodI7oy3J1}Fc!1o9@PHhN zc!8)%*dlwAgpd>K7aJiLDHk$>mFLl?*(cto7^e?279nmX79uv4q)u=zd4NouMx1OEGTx(5t}jn}~>T|FSoYs}qzy6e$!tlqAX&xu>F%JdA>+;zr4f z^e7*Nj9Ks;rV*SG_#xFH#h6FpcIilIY8i2Xp!d`Cg#4)@x5w9&t&5KU(>mL;#=D)k_n!<{DfwCzCKT@`SI(eT5`YzvG~WPcZM|H&2*@KD4d z>ZZ&d%IB$Z4elssli^YR@DKb_?x&>sq=6BfclO8%R(xFRQh)rr5*PyK-r^5}4GT(l z(-Y?(M64o)+Qlq4z`myGQhFU9)CHLk2ixKqNeHfUWv*$V*`7&Ty0JGoEhhl9&h-d* zXUnhVqeXXu3;AMkfGcaZn+#+$P#2ewEuZhXC^A9#t1B5K2yqA)1ge(y_I3?h7njx@LRV0N zd5f!)3@xoilPpGM9cc?qi--H^K9$+G?rEJWw0(?itnKuT^gd8DgWm~inIvlQMQZ7z zQhJ!lM(oKppOa9PBNCMpe=5h!E2pq3NB>q%a#W7HS5AXjj)+)JkXnuzTTY=_j;dHr zvNS^e!j<@Aj@93+Gklxb6P7tJn%U=QOqZa@9;Kc+WqCxG!k9XomN^Jv;sAHd zkaN$L1KkoEq1H2~*;k}Fbg0>zq&c{#+25o&{J7B*wJ|Wc(O0!Gbh*)+wK2H4(cif- z{K?f5z%|g%)mOkZw9nO>z%@9})!)E1eBaR%(J?UI(O1zibWU{uyLCXlb%eWh$h~z8 z!gD~xbA-%u$jEaH-E~0Ob%fn@$k}xa?tMV!eT43P$m)Fz|CPz+we-=-$dIZ(H*%47 z`LytqPrY_o7p2jH+w4f$?2O%f{($h%u25c}K0$c|{f`>d{I8W5{Qp{` z;u^(eVpm0@qI=ha=jrR%ebO=Iv}$&Zr>s%Q9d}aan6^>PKh^cJ%LQk1&Zew28LN_i z^DAbass=T6%PSTa%uiSzQJq8D%l{8;TKoUrY-S?53a(E$-=e$b@!mgozD_vWqN@we z|Bo}QWPIVw{~yaPI6h%_kN*F<`CG030)I4)=;(s&#O!&yvAS)K8t;Pb6V|t=|GR7A z#uXi&wR6Pzf8#Lk*Bj=s9lzdfc_`b}WQGgXi46R*CHJ}6r+;}OrvwA{_SY+o zK)H-vy{l!P`+NG*`*x6^PGgHH4!dsolgU4RKj@I8Xz~F6o?quCX&=VQ$Q{w01;M0? zKe|5r<_7CD z=eO3*x!r$aX2iFh3;}xNfx0v;SwBfGG+@Z;->HhvqfF4r__4$mU>Dl_1w;-9`~5rF~@!3;r~xP-hZvOfOx)A z#>8O3N{L{naf215f>m=bzbp7_(ssu&cx)Qo-{)!)Yz3A@Z0uZaM2yJ8#OGlzm?JO5gbrj~@)NB4@?>KE(K-$w}{};@dKY#K3+Vi64S<@!Z{(I{7l=!p9 z&kjG^P~0f46i13(w!hEDJga;*Eb z`!n|++@H8VaKG<9>VDh(y89J#=;Z$ei=GnD5TesW#|Wf)^D+9NKN4J3H5PF_t=V+Z zdeo8*h9+8&Zfc?>>1|E4B7MAx)^uy$L>szyXre7W|81fjy+RZ1>Gd}@@${~PCOXo) z$#HZd3)V3@lNGG%(3PyIbvyJTOJAWcN@Uh!FqUkx^&BuAvc)G}0~SKI`8ZZXw$*xP zum-ZdtPciTAUn$XWb6vrS=JX~f5?M%9S(=QsdYP?K%Odn0S0-Ad<-tBtS3W06I^FK z8}d2eR_n!(uK~APZ-#tl@SycxkRJ@5wmypdWV{MFtYBUY#g-Vv?5AEBj1 z`$T^tRKca*sn7gt%s@XUD-t>bij-4q-ilku9^;QJ3Mpc`HJ_EX4TGGQ-Og)`c~qm51<|gp7D@ zp#>Grssv^#A)&M8>ulnDM_5t#Al`#jaFpZ<#YJ@>!a$w@kEZ1<@PGs#L~kxOSz7jj zEhb?;W)eS}0IQQuk4~JT30>4rFJ3!b+77}>$_>v#2FFEnN^%(ls*o80pv0Q>#t#%H z@`Yy-FXQ9ULKh{Up&oA_A4B!(x^9&>i`+T|eD!&QOLVd(_avv-bFX~4^>o{%mzzrg_i~SBnr%DeE|i+^}|8?kaV(Z32{`vA^l!sp15>Z72z52FgXf z^8ZITvJ9eXBT1~iQjW|Q`Fac^ak$^N-vI^*geh5|*CdMz;n16gV_zk|Z7q8tFfCvU zJK^Pptnn0Rc~egGIAK}uv99VZm2WLPezQQ5K<`f zg{8Ll|GioPYfNheMj-7-S87=w4N0WxHP`1V6Y)0M&SkYzVrwp>yfsEF7wj&T0!}dB z)R~gGfP9pOR;GY_e0~K^^oJ-3AT+m~?Al!{>>5gNe17?OWz)$)sMH*xuQiB>FT2{i zQ>6U_8}Ay~r4li;jzG+$&?S12{)+<*k9 z<^SX#xY|jvlvTxt(m~C7{y{3g>7TX#o2q$xQO|fc<%8rE@A3=UW(o?gVg?gDV!0q6O!{MlX$6-Bu_m&0ms66 znWS&zr{O_4O&{2uCLQvA?xC5vGZ}KV1v6)#oTewgIMSnBur0PtM0&{R5t#UEy3I9) z`LVP?3f;o}sz*7g5qdTxJl^gk3>;8%SOPH@B)rmFOJ)m6?PlYa$y=RX%;}KId{m9R#2=LNwosF@OTivgMqxpRGe}5=LtAn?VVl6VWCFLD z7l#^^H8jY~42hR)OoVF#YDW(md!g(&pJ;yMj|UBAQa}UH?ED@%ci=*(q~Opn>kE2Q z_4Kgf|0kEA6ary41A;)^Ku(*nirvP!Y>{FZYBLXLP6QL~vRL+uMlZ?jWukMV*(dsn zL~~KA@jU)(UeoOz^4Gkw{fJsYQ%|UA7i79qO5=DOPBcWlv%pK!A+)*F`3WJ}t9FU3 zXhC4xMV7Z%5RjDs0=&vC4WdvD?Zi5tg4@xg8-GLUI>N$N&3aS4bHrp%3_1u9wqL)i z)XQLsI&{Hd&bQE!3m&D0vd!4D`l1$rt_{3NS?~lj#|$GN5RmvP(j3hzJOk=+0B*2v z)Bw133RMUM%wu_+$vbzOy?yk#kvR?xGsg-ipX4wKyXqd zROKp5))>tNy$HByaEHK%$mqd>-{Yoj`oSBK;w>+eZ&TVcj^DyXjo{DDbZ>vS2cCWB z(6&~GZ}kUdN(*2-nI!hvbnVy@z2E#F394OZD&Jb04}`Tgaj?MoY?1`{ejE2iud51% zQ~J0sijw(hqr_Ckbj@pm$FAVASKY(D4BS0GYPkSMqSDONRaFH+O2+jL{hIltJSJT~e)TNDr(}=Xt7|UhcU9eoXl&QZRR<9WomW%&m)FT~j zTgGd3-j}Uk%CRD;$@X)NNV9+RJbifYu>yr{FkO;p>_&njI> zyBHh_72bW;8}oGeY0gpHOxiV597j7mY<#?WMmkf5x~Kfk*re(&tG_mX<3&2cON*2u%V29tsXUv{#-ijs2>EuNH-x3) zPBpi+V6gI=wn}u164_j8xi-y(B?Au2o;UO=r6&)i5S3Mx*)*{_;u}~i4dh$`VgUS- zMG6t*?DXDYX0D2Oj31MI!HF>|aG8rjrOPnxHu4wZl;!=NGjjDoBpXf?ntrwt^dqxm zs(lE@*QB3NH)!`rH)5kks-D89g@UX&@DU9jvrsY)aI=9b4nPy3bfdX_U;#?zsan{G>DKob2LnhCJv8o}duQK)qP{7iaaf2=K`a-VNcfC582d4a z>sBJA*%S|NEazDxXcGPW_uZ&d7xG`~JB!U>U(}acUSn=FqOA~(pn^!aMXRnqiL0;? zebEZYouRv}-0r;Dq&z9>s#Rt1HL`0p4bB)A&sMyn|rE_9nh z?NO*RrjET8D4s(-`nS{MrdYtv*kyCnJKbsftG2D#ia@;42!8xd?a3P(&Y?vCf9na< zQ&Ni*1Qel&Xq{Z?=%f0SRqQt5m|Myg+8T=GDc)@^};=tM>9IDr7hdvE9-M@@<0pqv45xZTeNecbL- zWFQt4t`9>j8~X%lz}%We>Kzh_=`XO}!;4!OWH?=p*DOs#Nt({k^IvtBEL~Qafn)I^ zm*k{y7_bIs9YE}0B6%r`EIUH8US+MGY!KQA1fi-jCx9*}oz2k1nBsXp;4K<_&SN}}w<)!EylI_)v7}3&c)V;Cfuj*eJ2yc8LK=vugqTL><#65r6%#2e| zdYzZ)9Uq7)A$ol&ynM!|RDHc_7?FlWqjW>8TIHc`jExt)f5W|;D%GC#$u!%B*S%Z0 zsj&;bIU2jrt_7%$=!h4Q29n*A^^AI8R|stsW%O@?i+pN0YOU`z;TVuPy!N#~F8Z29 zzZh1`FU(q31wa>kmw{$q=MY>XBprL<1)Py~5TW4mgY%rg$S=4C^0qr+*A^T)Q)Q-U zGgRb9%MdE-&i#X3xW=I`%xDzAG95!RG9)s?v_5+qx`7NdkQ)If5}BoEp~h}XoeK>kweAMxJ8tehagx~;Nr_WP?jXa zJ&j7%Ef3w*XWf?V*nR)|IOMrX;$*$e23m?QN` zk>sC^GE=h6?*Cr~596s_QE@>Nnr?{EU+_^G=LZr#V&0fEXQ3IWtrM{=t^qJ62Sp=e zrrc>bzX^6yFV!^v7;>J9>j;`qHDQ4uc92eVe6nO@c>H=ouLQot``E~KLNqMqJ7(G+?GWO9Ol+q$w z!^kMv!n{vF?RqLnxVk{a_Ar;^sw0@=+~6!4&;SCh^utT=I zo&$CwvhNOjQpenw2`5*a6Gos6cs~*TD`8H9P4=#jOU_`%L!W;$57NjN%4 z39(61ZC#s7^tv`_4j}wMRT9rgDo*XtZwN-L;Qc$6v8kKkhmRrxSDkUAzGPgJ?}~_t zkwoGS4=6lsD`=RL|8L3O9L()N)lmEn-M15fRC{dhZ}7eYV%O-R^gsAp{q4 z!C1}_T8gy^v@SZ5R&Li5JMJy+K8iZw3LOGA0pN1~y@w7RRl#F()ii6Y5mr~Mdy@Kz z@FT4cm^I&#Fu_9IX(HAFP{XLbRALqm&)>m_we>a`hfv?eE|t z?YdDp2yAhj-~vuw^wzVDuj%w?exOcOT(ls(F*ceCe(C5HlN{lcQ;}|mRPqFDqLEzw zR7ldY+M6xe$$qLwekmk{Z&5cME$gpC?-8)f0m$rqaS|mj9ATNJvvyCgs(f2{r;2E!oy$k5{jik#(;S>do<#m0wVcU<}>)VtYmF9O0%(C>GDzPgh6X z9OkQLMR~y7=|MtaU!LDPPY7O)L{X#SC+M|v^X2CZ?$GS>U_|aC(VA(mIvCNk+biD| zSpj>gd(v>_Cbq>~-x^Y3o|?eHmuC?E&z>;Ij`%{$Pm$hI}bl0Kd`9KD~AchY+goL1?igDxf$qxL9< z4sW@sD)nwWr`T>e2B8MQN|p*DVTT8)3(%AZ&D|@Zh6`cJFT4G^y6`(UdPLY-&bJYJ z*L06f2~BX9qX}u)nrpmHPG#La#tiZ23<>`R@u8k;ueM6 znuSTY7>XEc+I-(VvL?Y>)adHo(cZ;1I7QP^q%hu#M{BEd8&mG_!EWR7ZV_&EGO;d(hGGJzX|tqyYEg2-m0zLT}a{COi$9!?9yK zGN7&yP$a|0gL`dPUt=4d^}?zrLN?HfKP0_gdRvb}1D73Hx!tXq>7{DWPV;^X{-)cm zFa^H5oBDL3uLkaFDWgFF@HL6Bt+_^g~*o*t`Hgy3M?nHhWvTp^|AQDc9_H< zg>IaSMzd7c(Sey;1SespO=8YUUArZaCc~}}tZZX80w%)fNpMExki-qB+;8xVX@dr; z#L52S6*aM-_$P9xFuIui;dN#qZ_MYy^C^hrY;YAMg;K`!ZpKKFc z9feHsool)`tFSS}Su|cL0%F;h!lpR+ym|P>kE-O`3QnHbJ%gJ$dQ_HPTT~>6WNX41 zoDEUpX-g&Hh&GP3koF4##?q*MX1K`@=W6(Gxm1=2Tb{hn8{sJyhQBoq}S>bZT zisRz-xDBYoYxt6--g2M1yh{#QWFCISux}4==r|7+fYdS$%DZ zXVQu{yPO<)Hn=TK`E@;l!09aY{!TMbT)H-l!(l{0j=SEj@JwW0a_h-2F0MZNpyucb zPPb+4&j?a!6ZnPTB>$t`(XSf-}`&+#rI#`GB> zl=$3HORwccTnA2%>$Nmz)u7j%_ywoGri1UXVNRxSf(<@vDLKKxFo;5pTI$R~a|-sQ zd5Rfwj+$k1t0{J`qOL^q>vZUHc7a^`cKKVa{66z?wMuQAfdZBaVVv@-wamPmes$d! z>gv^xx<0jXOz;7HIQS z4RBIFD?7{o^IQ=sNQ-k!ao*+V*|-^I2=UF?{d>bE9avsWbAs{sRE-y`7r zxVAKA9amvo4T}ZAHSF-{y1GqUHlDp4DO9I3mz5h8n|}P-9nKD|$r9AS3gbF1AX=2B zyaK3TbKYqv%~JHKQH8v+%zQ8UVEGDZY|mb>Oe3JD_Z{+Pq%HB+J1s*y6JOlk`6~H) zKt)YMZ*RkbU!GPHzJltmW-=6zqO=5;S)jz{ zFSx?ryqSMxgx|Nhv3z#kFBTuTBHsViaOHs5e&vXZ@l@mVI37<+^KvTE51!pB4Tggq zz!NlRY2ZLno0&6bA|KHPYOMY;;LZG&_lzuLy{@i$&B(}_*~Zk2 z>bkQ7u&Ww%CFh{aqkT{HCbPbRX&EvPRp=}WKmyHc>S_-qbwAr0<20vEoJ(!?-ucjE zKQ+nSlRL^VnOX0h+WcjGb6WI(8;7bsMaHXDb6ynPoOXMlf9nLKre;w*#E_whR#5!! z!^%_+X3eJVKc$fMZP;+xP$~e(CIP1R&{2m+iTQhDoC8Yl@kLM=Wily_cu>7C1wjVU z-^~I0P06ZSNVaN~A`#cSBH2L&tk6R%dU1(u1XdAx;g+5S^Hn9-L$v@p7CCF&PqV{Z?R$}4EJi36+u2JP7l(@fYfP!=e#76LGy^f>~vs0%s*x@X8`|5 zGd6JOHsQ=feES4Vo8%1P_7F5qjiIm#oRT0kO1(?Z_Dk6oX&j=Xd8Klk(;gk3S(ZFnc^8Gc=d;8O-R9tlGyp=2I@1teAZpGWUi;}`n zbJOS_Z2L16nVtDnPpMn{+wR9&yU9~C<-ncppPee`>@1k7hTl5Fn_3_KzQ)u{iJPp3 z)df?Xo%9ta%(dp@DhKuQj4D8=_!*ra#Ib&OXKrsYvAG%H7Kq|43WbayvsbeeimSa= z8~{7ya9ZUAIgLLPeuNmSB&#-`Je0Lja)M$}I41KHb7dQq$wgwX+EElNxBgyyLbA2* z=c1VJR%EPJEw(7!UE?4w@94{pI3E%(acEYd8*Wmr^R7|IM2RZ-RVXSkXy-8$!(iB* zQA`qh2Ze!EY6}Zs7vRz&nr|L60NlIgnO3L*Yz2k2Ivfen?drnVzzu3)1V&-t5S~S? zw#=Sdh>K@2vA25su*@>npw&7A%|Uh9T1jR$mV*H@)pU0&2#Se`7iJlOr$mp79`DKM z5vr*XLrg7w6lc4&S{So1KGKBqcuJ!E|HVFB?vTOjQHi)g+FwJqX@Y3q(qa#6T@3{q zhc@2T-W}XD9x4u+LCdce$*}x!Sc#+rH-sCz6j}0EE`Tk*irUq)y^za`}^1gFnF)C!yf_l_}I<6qfbT$Gc&Eyr?!QwJR~RE4!gKVmqjbI+I^*^ z&hz^7r-dgm@Mbfc#{JTH&^6sJCZt-NTpChB^fzQ}?etydyf~+)!d%V$0faN(f`rJb zm_YaJZ@>Fg>Ay2&bzTx3w^u-lsulc{mX4-nH*A(32O&b^EWmSuk{#HJk}_ULC}SB(L7`YAs>opp9o5UcnB^kVB*rmW6{s0&~_>J!_#+cEWib@v-Ms`?!&=3fDot`oH9v&$f<52>{n2l* z1FRzJ#yQbTHO}}wt0!y8Eh-0*|Um3vjX-nWH>`JN5tWB_gnW%; zUJ0V?_a#+!=>ahhrbGvmvObe8=v1uI8#gNHJ#>RwxL>E^pT05Br8+$@a9aDC1~$@* zicSQCbQcr=DCHM*?G7Hsovk|{$3oIwvymi#YoXeVfWj{Gd#XmnDgzQPRUKNAAI44y z{1WG&rhIR4ipmvBmq$BZ*5tmPIZmhhWgq|TcuR{6lA)+vhj(cH`0;+B^72{&a7ff* zkrIo|pd-Yxm+VVptC@QNCDk0=Re%Sz%ta7y{5Dn9(EapBS0r zLbDKeZepar5%cAcb<^;m>1{QhMzRmRem=+0I3ERot-)gb`i|sII^A#^Gz+x>TW5A& z3PQcpM$lDy`zb%1yf!e8&_>D02RN950KzW>GN6n@2so&Wu09x@PB=&IkIf|zZ1W}P zAKf*&Mo5@@G=w&290aG1@3=IMCB^|G4L7*xn;r3v&HBrD4D)Zg+)f~Ls$7*P-^i#B z4X7ac=0&58j^@2EBZCs}YPe3rqgLAA1L3Y}o?}$%u~)7Rk=LLFbAdSy@-Uw6lv?0K z&P@@M`o2Rll3GoYjotf@WNNjHbe|R?IKVn*?Rzf9v9QoFMq)ODF~>L}26@z`KA82t z43e!^z&WGqAk$Ww8j6bc3$I|;5^BHwt`?e)zf|&+l#!8uJV_Cwy-n1yS0^Q{W*a8B zTzTYL>tt&I&9vzGQUrO?YIm6C1r>eyh|qw~-&;7s7u1achP$K3VnXd8sV8J7ZTxTh z5+^*J5%_#X)XL2@>h(Gmv$@)fZ@ikR$v(2Rax89xscFEi!3_;ORI0dBxw)S{r50qf zg&_a*>2Xe{s@)7OX9O!C?^6fD8tc3bQTq9}fxhbx2@QeaO9Ej+2m!u~+u%Q6?Tgz{ zjYS}bleKcVhW~1$?t*AO^p!=Xkkgwx6OTik*R3~yg^L`wUU9Dq#$Z*iW%?s6pO_f8 zJ8w#u#Eaw7=8n{zJ}C>w{enA6XYHfUf7h)!Qaev)?V=yW{b@-z`hAz;I7^|DoFChP z1aYQnkGauh*ps6x*_S77@z1wwGmF8ky9fMbM$dr*`vsot4uvqWn)0vTRwJqH#&D%g zL3(0dP>%Oj&vm5Re%>*4x|h1J2X*mK5BH1?Nx_#7( zepgF`+n)rHXj!RiipusEq!X81;QQBXlTvLDj=Qub(ha&D=BDx3@-V*d!D9PeXUY?l zwZ0<4=iY!sUj4G>zTS+eYX7knN-8Oynl=NdwHS*nSz_5}*5LQ@=?Yr?uj$`C1m2OR zK`f5SD2|;=BhU#AmaTKe9QaSHQ_DUj1*cUPa*JICFt1<&S3P3zsrs^yUE;tx=x^cmW!Jq!+hohv_B> zPDMT0D&08dC4x@cTD$o1$x%So1Ir(G3_AVQMvQ13un~sP(cEWi$2%5q93E7t{3VJf%K? zuwSyDke~7KuB2?*#DV8YzJw z&}SCDexnUPD!%4|y~7}VzvJ4ch)WT4%sw@ItwoNt(C*RP)h?&~^g##vnhR0!HvIYx z0td2yz9=>t3JNySl*TszmfH6`Ir;ft@RdWs3}!J88UE|gj_GMQ6$ZYphUL2~4OY7} zB*33_bjkRf_@l;Y!7MIdb~bVe;-m78Pz|pdy=O*3kjak63UnLt!{^!!Ljg0rJD3a~ z1Q;y5Z^MF<=Hr}rdoz>yRczx+p3RxxgJE2GX&Si)14B@2t21j4hnnP#U?T3g#+{W+Zb z5s^@>->~-}4|_*!5pIzMCEp|3+i1XKcfUxW`8|ezAh>y{WiRcjSG*asw6;Ef(k#>V ztguN?EGkV_mGFdq!n#W)<7E}1#EZN8O$O|}qdoE|7K?F4zo1jL-v}E8v?9qz(d$&2 zMwyK&xlC9rXo_2xw7Qe0caC?o?Pc*-QAOE!+UvRuKjG+;dk|jQhDDBe?`XT7Y5lte zqSu0t5`;>Wv%|nhj|ZiE^IqA_lZu7OWh!2Y(627zb=r7Ends}wVk7Q5o09a@ojhH7 zU0m&h*8+j4e|OqWyJ&B`V`y=>MVO;K9=hk^6EsmVAGkLT{oUtR{JqSRY{Qi{kKw1k z6s;0SMPJOLp!som|A`*q3t0wIj-=bG8a#MC)MHcMSQU98Juv$?$CvYX)(n`P^!`5| zv3q@@|G@6wMqh;d;m4qvdibx2Yjml}vG9mDv&!0ne02M#D`Bo}xIB0VWh8>>WtNZQ z$&ISlJX;*ORQIO;k62qA{^6P%3!Z=Y1EbmY02{w^yB$`;%!{kur&XTGDiO2cjA)lr zsY^XZWy^DSAaz;kZ_VG?uWnJR7qdN18$~)>(kOoybY0~QYu9||K#|$Mby{3GduV~N zk9H7$7=RSo+?CUYF502`b76ytBy}sFak&|HIwRvB=0D|S`c#QCJPq zP)uOWI)#(n&{6|C4A^G~%B~BY21aOMoz9RuuM`Ip%oBz+NoAlb7?#`E^}7xXo!4S? zFg8I~G%!@nXi8&aJSGFcZAxQf;0m}942=i#p-&teLvE{AKm7Sl2f}Io?!IqbC|J;h z`=5LFOnU5?^w~SV@YwNZx$k_(kLNxZDE z3cf08^-rIT_>A$}B%IJBPcN^)4;90BQtiEi!gT#+EqyAUZ|}*b_}R>SGloq&6?opL zuT_+lwQMgg6!Cso$BwUA;k-1NcrzyE>(_X$B0HocjY~=Pk~Q08+N}(|%HjO_i+*=o z%G6C6A30Ch<0UlG;Zdj@ed!rfUY_i9mYwK8(aYuzcUzlTJ1yPz|Bb-9b33A9zRhGl>Ny-Q#JAq-+qtI@B@&w z$;PJbyiW=!py@g2hAi0)U1v=;avka`gd@8LC4=BEbNqL&K^UAQ5%r95#x%^qRB%KLaqMnG|6xKAm}sx!Qwo}J=2C;NROi$mfADui4)y(3wVA3k~{j^_5%H)C6K zlYAm1eY**HZOj($)xfKIQFtIVw$4&yvz9>(Crs>Gh{ zya6-FG7Dgi92#K)64=9Csj5?Zqe~_9TwSI!2quAwa1w-*uC5!}xY`?tltb0Hq740< zsq2QelPveZ4chr$=~U3!+c&>xyfvA1`)owOqj=i4wjY=A1577Gwg&Ko7;?il9r|_* z8P&IDV_g2D{in5OLFxsO!kx3AhO$5aKeoM|!q|VokqMlYM@HtsRuMtBY%I35#5$+G zpp|JOeoj^U=95HLemB04Yqv{a8X<^K9G2`&ShM_6&Bi1n?o?@MXsDj9Z*A3>#XK%J zRc*&SlFl>l)9DyRQ{*%Z+^e1XpH?0@vhpXrnPPU*d%vOhKkimm-u3c%Q^v3RKp9kx@A2dS?QfS=iigGr7m><)YkV=%LA5h@Uj@9=~ABPMJ z1UE;F&;Ttg5Kc^Qy!1SuvbNEqdgu3*l`=>s5_}dUv$B%BJbMiWrrMm7OXOdi=GOmh zZBvXXK7VqO&zojI2Om9};zCB5i|<210I{iwiGznGCx=FT89=Ef)5!lB1cZ6lbzgDn07*he}G&w7m!;|E(L-?+cz@0<9ZI~LqYQE7>HnPA436}oeN2Y(VfG6 zxNZuMK3Crm^Z_AFeHc~CVRrSl0W^?+Gbteu1g8NGYa3(8f*P{(ZT>%!jtSl6WbYVv zmE(37t0C8vJ6O-5+o*lL9XRcFbd~GSBGbGh3~R!67g&l)7n!kJlWd)~TUyXus#!&G6sR%(l(h1$xyrR5j_jM1zj#giA&@(Xl26@n<9>folx!92bQ z24h570+<)4!$!IQ(5yOU|4_E6aN@4v0+{Kx~Z z;q7fp%0cHziuI%!kB~w}g9@V+1wDz0wFlzX2UOvOy|&;e;t!lAR8tV2KQHgtfk8Uf zw;rs!(4JPODERk4ckd5I2Vq|0rd@@Mwd8MID%0^fITjYIQom^q;qhP8@|eJx{?5xX zc1@Fj*kDknlk{c-rnCloQ3hGh7OU+@efO3>fkRMcM>J?AeVP& zlfzX%cdp=N+4S#E*%^=BQ+N`A7C}|k%$|QUn0yI6S3$MS-NjO!4hm55uyju)Q6e!} z*OVO@A#-mfC9Pha6ng((Xl^V7{d+&u+yx)_B1{~t7d5e8L^i4J>;x<7@5;+l7-Gge zf#9diXJ$&v^rbN5V(ee%q0xBMEgS6%qZm7hNUP%G;^J44I!BmI@M*+FWz0!+s;+iQ zU4CuI+27bvNK8v>?7PZnVxB=heJ&_ymE0nN^W#-rqB%+JXkYGDuRw>JM_LdtLkiq* z6%%3&^BX$jnM@2bjiGc-DymKly)wVkA-pq;jSWL#7_*moZZ4I|-N}o8SK?sIv)p|c zu~9-B%tMc=!)YMFp*SiC0>kfnH8+X5>;+FFVN{~a9YVdIg1uGkZ~kegFy{^PU(4{( z`CbY`XmVA3esai686Yw8djCEyF7`bfB^F1)nwv+AqYLZ&Zy=eFhYT2uMd@{sP_qS4 zbJ&>PxajjZt?&c<1^!T|pLHfX=E^FJ>-l_XCZzvRV%x}@u(FtF(mS+Umw$e+IA74e>gCdTqi;6&=euAIpxd=Y3I5xWR zBhGoT+T`V1@91OlQ}2YO*~P4ukd*TBBdt?Plt)_ou6Y@Db`ss+Q~A-48s>?eaJYA2 zRGOa8^~Em}EFTmKIVVbMb|ob)hJJ7ITg>yHAn2i|{2ZJU!cwt9YNDT0=*WO7Bq#Xj zg@FjEaKoolrF8%c;49|`IT&25?O$dq8kp3#la9&6aH z6G|{>^C(>yP7#Dr$aeFyS0Ai_$ILhL43#*mgEl(c*4?Ae;tRL&S7Vc}Szl>B`mBuI zB9Y%xp%CZwlH!3V(`6W4-ZuETssvI&B~_O;CbULfl)X1V%(H7VSPf`_Ka9ak@8A=z z1l|B1QKT}NLI`WVTRd;2En5u{0CRqy9PTi$ja^inu){LJ&E&6W%JJPw#&PaTxpt?k zpC~gjN*22Q8tpGHR|tg~ye#9a8N<%odhZJnk7Oh=(PKfhYfzLAxdE36r<6a?A;rO&ELp_Y?8Pdw(PT^Fxn!eG_|LEbSYoBrsBA|6Fgr zt5LntyusI{Q2fdy=>ditS;}^B;I2MD4=(>7fWt0Jp~y=?VvfvzHvQhj6dyIef46J$ zl4Xu7U9v_NJV?uBBC0!kcTS0UcrV7+@~is?Fi+jrr@l3XwD|uG zr26jUWiv>Ju48Y^#qn7r9mwIH-Pv6Y|V|V-GZ&+&gQ?S?-`&ts{@5GXPqbmyZjUACC&oVXfNwUX0}ba(v978 zp8z!v9~8Zx8qB@7>oFPDm^iR@+yw`79YF)w^OHB_N;&&x7c3l^3!)IY#)}x)@D(iNaOm9 zC=^*!{`7={3*S=%iU=KsPXh=DDZcc``Ss>057i{pdW8M@4q+Ba@Tt%OytH!4>rbIbQw^-pR zGGYNPzw@n=PV@)b7yVbFr;glF*Qq3>F9oBN5PUXt!?2mdGcpv^o1?Thp`jP10G2Yi z(c93td3F3SW!Le5DUwdub!aDKoVLU6g!O?Ret21l$qOC;kdd@L#M&baVu&JZGt&<6 z!VCkvgRaav6QDW2x}tUy4~Y5(B+#Ej-8vM?DM-1?J_*&PntI3E96M!`WL#<&Z5n2u zo`P!~vBT$YOT~gU9#PB)%JZ zcd_u=m^LYzC!pH#W`yA1!(fA;D~b zG#73@l)NNd;n#XrKXZEfab;@kQRnOFU2Th-1m<4mJzlj9b3pv-GF$elX7ib9!uILM_$ke zHIGB*&=5=;ynQA{y7H93%i^d)T}y@(p>8vVhJ4L)M{0Q*@D^+SPp`EW+G6E%+`Z;u zS3goV@Dic7vc5`?!pCN44Ts@*{)zwy)9?B||AM{zKlN4T}qQRL2 zgv+{K8bv7w)#xge16;kI1fU87!W4pX)N&|cq8&i^1r`W|Hg4366r(?-ecEJ9u&Eaw zrhyikXQB>C9d>cpPGiu=VU3Z-u4|0V_iap!_J3o+K_R5EXk@sfu~zHwwYkpncVh!R zqNe7Cmf_|Wmeq4#(mIO&(wCK@b4(x0?W1Qtk(`$?+$uCJCGZm_%k?l32vuShgDFMa ztc`{$8DhB9)&?~(m&EUc=LzI1=qo#zjy#2{hLT_*aj<618qQ7mD#k2ZFGou&69;=2 z1j7=Su8k}{L*h&mfs7jg^PN&9C1Z@U!p6gXk&-7xM~{X`nqH#aGO`;Xy_zbz^rYacIq0AH%4!Oh93TzJ820%ur)8OyeS@K?sF1V(iFO z37Nnqj1z#1{|v7=_CX`lQA|$<1gtuNMHGNJYp1D_k;WQk-b+T6VmUK(x=bWviOZ~T z|4e%SpuaWLWD?qN2%`S*`P;BQBw(B__wTD6epvGdJ+>DBq2oVlf&F*lz+#avb4)3P1c^Mf#olQheVvZ|Z5 z>xXfgmv!5Z^SYn+_x}K5B%G^sRwiez&z9|f!E!#oJlT2kCOV0000$L_|bHBqAarB4TD{W@grX1CUr72@caw0faEd7-K|4L_|cawbojjHdpd6 zI6~Iv5J?-Q4*&oF000000FV;^004t70Z6Qk1Xl{X9oJ{sRC2(cs?- literal 0 HcmV?d00001 diff --git a/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js b/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js deleted file mode 100755 index 2c64257146c4..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js +++ /dev/null @@ -1,1999 +0,0 @@ -/** -* bootstrap.js v3.0.0 by @fat and @mdo -* Copyright 2013 Twitter Inc. -* http://www.apache.org/licenses/LICENSE-2.0 -*/ -if (!jQuery) { throw new Error("Bootstrap requires jQuery") } - -/* ======================================================================== - * Bootstrap: transition.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#transitions - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/) - // ============================================================ - - function transitionEnd() { - var el = document.createElement('bootstrap') - - var transEndEventNames = { - 'WebkitTransition' : 'webkitTransitionEnd' - , 'MozTransition' : 'transitionend' - , 'OTransition' : 'oTransitionEnd otransitionend' - , 'transition' : 'transitionend' - } - - for (var name in transEndEventNames) { - if (el.style[name] !== undefined) { - return { end: transEndEventNames[name] } - } - } - } - - // http://blog.alexmaccaw.com/css-transitions - $.fn.emulateTransitionEnd = function (duration) { - var called = false, $el = this - $(this).one($.support.transition.end, function () { called = true }) - var callback = function () { if (!called) $($el).trigger($.support.transition.end) } - setTimeout(callback, duration) - return this - } - - $(function () { - $.support.transition = transitionEnd() - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: alert.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#alerts - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // ALERT CLASS DEFINITION - // ====================== - - var dismiss = '[data-dismiss="alert"]' - var Alert = function (el) { - $(el).on('click', dismiss, this.close) - } - - Alert.prototype.close = function (e) { - var $this = $(this) - var selector = $this.attr('data-target') - - if (!selector) { - selector = $this.attr('href') - selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 - } - - var $parent = $(selector) - - if (e) e.preventDefault() - - if (!$parent.length) { - $parent = $this.hasClass('alert') ? $this : $this.parent() - } - - $parent.trigger(e = $.Event('close.bs.alert')) - - if (e.isDefaultPrevented()) return - - $parent.removeClass('in') - - function removeElement() { - $parent.trigger('closed.bs.alert').remove() - } - - $.support.transition && $parent.hasClass('fade') ? - $parent - .one($.support.transition.end, removeElement) - .emulateTransitionEnd(150) : - removeElement() - } - - - // ALERT PLUGIN DEFINITION - // ======================= - - var old = $.fn.alert - - $.fn.alert = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.alert') - - if (!data) $this.data('bs.alert', (data = new Alert(this))) - if (typeof option == 'string') data[option].call($this) - }) - } - - $.fn.alert.Constructor = Alert - - - // ALERT NO CONFLICT - // ================= - - $.fn.alert.noConflict = function () { - $.fn.alert = old - return this - } - - - // ALERT DATA-API - // ============== - - $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: button.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#buttons - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // BUTTON PUBLIC CLASS DEFINITION - // ============================== - - var Button = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, Button.DEFAULTS, options) - } - - Button.DEFAULTS = { - loadingText: 'loading...' - } - - Button.prototype.setState = function (state) { - var d = 'disabled' - var $el = this.$element - var val = $el.is('input') ? 'val' : 'html' - var data = $el.data() - - state = state + 'Text' - - if (!data.resetText) $el.data('resetText', $el[val]()) - - $el[val](data[state] || this.options[state]) - - // push to event loop to allow forms to submit - setTimeout(function () { - state == 'loadingText' ? - $el.addClass(d).attr(d, d) : - $el.removeClass(d).removeAttr(d); - }, 0) - } - - Button.prototype.toggle = function () { - var $parent = this.$element.closest('[data-toggle="buttons"]') - - if ($parent.length) { - var $input = this.$element.find('input') - .prop('checked', !this.$element.hasClass('active')) - .trigger('change') - if ($input.prop('type') === 'radio') $parent.find('.active').removeClass('active') - } - - this.$element.toggleClass('active') - } - - - // BUTTON PLUGIN DEFINITION - // ======================== - - var old = $.fn.button - - $.fn.button = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.button') - var options = typeof option == 'object' && option - - if (!data) $this.data('bs.button', (data = new Button(this, options))) - - if (option == 'toggle') data.toggle() - else if (option) data.setState(option) - }) - } - - $.fn.button.Constructor = Button - - - // BUTTON NO CONFLICT - // ================== - - $.fn.button.noConflict = function () { - $.fn.button = old - return this - } - - - // BUTTON DATA-API - // =============== - - $(document).on('click.bs.button.data-api', '[data-toggle^=button]', function (e) { - var $btn = $(e.target) - if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') - $btn.button('toggle') - e.preventDefault() - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: carousel.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#carousel - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // CAROUSEL CLASS DEFINITION - // ========================= - - var Carousel = function (element, options) { - this.$element = $(element) - this.$indicators = this.$element.find('.carousel-indicators') - this.options = options - this.paused = - this.sliding = - this.interval = - this.$active = - this.$items = null - - this.options.pause == 'hover' && this.$element - .on('mouseenter', $.proxy(this.pause, this)) - .on('mouseleave', $.proxy(this.cycle, this)) - } - - Carousel.DEFAULTS = { - interval: 5000 - , pause: 'hover' - , wrap: true - } - - Carousel.prototype.cycle = function (e) { - e || (this.paused = false) - - this.interval && clearInterval(this.interval) - - this.options.interval - && !this.paused - && (this.interval = setInterval($.proxy(this.next, this), this.options.interval)) - - return this - } - - Carousel.prototype.getActiveIndex = function () { - this.$active = this.$element.find('.item.active') - this.$items = this.$active.parent().children() - - return this.$items.index(this.$active) - } - - Carousel.prototype.to = function (pos) { - var that = this - var activeIndex = this.getActiveIndex() - - if (pos > (this.$items.length - 1) || pos < 0) return - - if (this.sliding) return this.$element.one('slid', function () { that.to(pos) }) - if (activeIndex == pos) return this.pause().cycle() - - return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos])) - } - - Carousel.prototype.pause = function (e) { - e || (this.paused = true) - - if (this.$element.find('.next, .prev').length && $.support.transition.end) { - this.$element.trigger($.support.transition.end) - this.cycle(true) - } - - this.interval = clearInterval(this.interval) - - return this - } - - Carousel.prototype.next = function () { - if (this.sliding) return - return this.slide('next') - } - - Carousel.prototype.prev = function () { - if (this.sliding) return - return this.slide('prev') - } - - Carousel.prototype.slide = function (type, next) { - var $active = this.$element.find('.item.active') - var $next = next || $active[type]() - var isCycling = this.interval - var direction = type == 'next' ? 'left' : 'right' - var fallback = type == 'next' ? 'first' : 'last' - var that = this - - if (!$next.length) { - if (!this.options.wrap) return - $next = this.$element.find('.item')[fallback]() - } - - this.sliding = true - - isCycling && this.pause() - - var e = $.Event('slide.bs.carousel', { relatedTarget: $next[0], direction: direction }) - - if ($next.hasClass('active')) return - - if (this.$indicators.length) { - this.$indicators.find('.active').removeClass('active') - this.$element.one('slid', function () { - var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()]) - $nextIndicator && $nextIndicator.addClass('active') - }) - } - - if ($.support.transition && this.$element.hasClass('slide')) { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $next.addClass(type) - $next[0].offsetWidth // force reflow - $active.addClass(direction) - $next.addClass(direction) - $active - .one($.support.transition.end, function () { - $next.removeClass([type, direction].join(' ')).addClass('active') - $active.removeClass(['active', direction].join(' ')) - that.sliding = false - setTimeout(function () { that.$element.trigger('slid') }, 0) - }) - .emulateTransitionEnd(600) - } else { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $active.removeClass('active') - $next.addClass('active') - this.sliding = false - this.$element.trigger('slid') - } - - isCycling && this.cycle() - - return this - } - - - // CAROUSEL PLUGIN DEFINITION - // ========================== - - var old = $.fn.carousel - - $.fn.carousel = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.carousel') - var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option) - var action = typeof option == 'string' ? option : options.slide - - if (!data) $this.data('bs.carousel', (data = new Carousel(this, options))) - if (typeof option == 'number') data.to(option) - else if (action) data[action]() - else if (options.interval) data.pause().cycle() - }) - } - - $.fn.carousel.Constructor = Carousel - - - // CAROUSEL NO CONFLICT - // ==================== - - $.fn.carousel.noConflict = function () { - $.fn.carousel = old - return this - } - - - // CAROUSEL DATA-API - // ================= - - $(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) { - var $this = $(this), href - var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7 - var options = $.extend({}, $target.data(), $this.data()) - var slideIndex = $this.attr('data-slide-to') - if (slideIndex) options.interval = false - - $target.carousel(options) - - if (slideIndex = $this.attr('data-slide-to')) { - $target.data('bs.carousel').to(slideIndex) - } - - e.preventDefault() - }) - - $(window).on('load', function () { - $('[data-ride="carousel"]').each(function () { - var $carousel = $(this) - $carousel.carousel($carousel.data()) - }) - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: collapse.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#collapse - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // COLLAPSE PUBLIC CLASS DEFINITION - // ================================ - - var Collapse = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, Collapse.DEFAULTS, options) - this.transitioning = null - - if (this.options.parent) this.$parent = $(this.options.parent) - if (this.options.toggle) this.toggle() - } - - Collapse.DEFAULTS = { - toggle: true - } - - Collapse.prototype.dimension = function () { - var hasWidth = this.$element.hasClass('width') - return hasWidth ? 'width' : 'height' - } - - Collapse.prototype.show = function () { - if (this.transitioning || this.$element.hasClass('in')) return - - var startEvent = $.Event('show.bs.collapse') - this.$element.trigger(startEvent) - if (startEvent.isDefaultPrevented()) return - - var actives = this.$parent && this.$parent.find('> .panel > .in') - - if (actives && actives.length) { - var hasData = actives.data('bs.collapse') - if (hasData && hasData.transitioning) return - actives.collapse('hide') - hasData || actives.data('bs.collapse', null) - } - - var dimension = this.dimension() - - this.$element - .removeClass('collapse') - .addClass('collapsing') - [dimension](0) - - this.transitioning = 1 - - var complete = function () { - this.$element - .removeClass('collapsing') - .addClass('in') - [dimension]('auto') - this.transitioning = 0 - this.$element.trigger('shown.bs.collapse') - } - - if (!$.support.transition) return complete.call(this) - - var scrollSize = $.camelCase(['scroll', dimension].join('-')) - - this.$element - .one($.support.transition.end, $.proxy(complete, this)) - .emulateTransitionEnd(350) - [dimension](this.$element[0][scrollSize]) - } - - Collapse.prototype.hide = function () { - if (this.transitioning || !this.$element.hasClass('in')) return - - var startEvent = $.Event('hide.bs.collapse') - this.$element.trigger(startEvent) - if (startEvent.isDefaultPrevented()) return - - var dimension = this.dimension() - - this.$element - [dimension](this.$element[dimension]()) - [0].offsetHeight - - this.$element - .addClass('collapsing') - .removeClass('collapse') - .removeClass('in') - - this.transitioning = 1 - - var complete = function () { - this.transitioning = 0 - this.$element - .trigger('hidden.bs.collapse') - .removeClass('collapsing') - .addClass('collapse') - } - - if (!$.support.transition) return complete.call(this) - - this.$element - [dimension](0) - .one($.support.transition.end, $.proxy(complete, this)) - .emulateTransitionEnd(350) - } - - Collapse.prototype.toggle = function () { - this[this.$element.hasClass('in') ? 'hide' : 'show']() - } - - - // COLLAPSE PLUGIN DEFINITION - // ========================== - - var old = $.fn.collapse - - $.fn.collapse = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.collapse') - var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option) - - if (!data) $this.data('bs.collapse', (data = new Collapse(this, options))) - if (typeof option == 'string') data[option]() - }) - } - - $.fn.collapse.Constructor = Collapse - - - // COLLAPSE NO CONFLICT - // ==================== - - $.fn.collapse.noConflict = function () { - $.fn.collapse = old - return this - } - - - // COLLAPSE DATA-API - // ================= - - $(document).on('click.bs.collapse.data-api', '[data-toggle=collapse]', function (e) { - var $this = $(this), href - var target = $this.attr('data-target') - || e.preventDefault() - || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7 - var $target = $(target) - var data = $target.data('bs.collapse') - var option = data ? 'toggle' : $this.data() - var parent = $this.attr('data-parent') - var $parent = parent && $(parent) - - if (!data || !data.transitioning) { - if ($parent) $parent.find('[data-toggle=collapse][data-parent="' + parent + '"]').not($this).addClass('collapsed') - $this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed') - } - - $target.collapse(option) - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: dropdown.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#dropdowns - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // DROPDOWN CLASS DEFINITION - // ========================= - - var backdrop = '.dropdown-backdrop' - var toggle = '[data-toggle=dropdown]' - var Dropdown = function (element) { - var $el = $(element).on('click.bs.dropdown', this.toggle) - } - - Dropdown.prototype.toggle = function (e) { - var $this = $(this) - - if ($this.is('.disabled, :disabled')) return - - var $parent = getParent($this) - var isActive = $parent.hasClass('open') - - clearMenus() - - if (!isActive) { - if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) { - // if mobile we we use a backdrop because click events don't delegate - $('
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index d95a35904dc7..602122db4a31 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -499,11 +499,12 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); if (!ritTotalNum || ritTotalNum < 1) { return; } + var ritPerPage = parseInt($("#rit_per_page").val()); $("#rit_pagination").sPage({ page:1, total:ritTotalNum, - pageSize:5, + pageSize:ritPerPage, noData: false, showPN:true, prevPage:"prev", From 4e59014bed66bb3204a1d60d42c69b0eb86c0219 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Fri, 25 Sep 2020 19:24:08 +0800 Subject: [PATCH 075/769] Add Zheng Wang to developers list. (#2457) --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 06631c30d6e2..088a7e4898e4 100755 --- a/pom.xml +++ b/pom.xml @@ -674,6 +674,12 @@ meiyi@apache.org +8 + + wangzheng + Zheng (bsglz) Wang + wangzheng@apache.org + +8 + From c86b0655a3b312af912c2d384bc4d19f6d110b3e Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Tue, 22 Sep 2020 14:48:45 -0700 Subject: [PATCH 076/769] HBASE-25085 Add support for java properties to hbase-vote.sh Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel Signed-off-by: Tak-Lon (Stephen) Wu --- dev-support/hbase-vote.sh | 39 ++++++++++++---------- src/main/asciidoc/_chapters/developer.adoc | 4 ++- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/dev-support/hbase-vote.sh b/dev-support/hbase-vote.sh index 88e22849a92f..d608f1e5e4a4 100755 --- a/dev-support/hbase-vote.sh +++ b/dev-support/hbase-vote.sh @@ -29,7 +29,7 @@ hbase-vote. A script for standard vote which verifies the following items 4. Built from source 5. Unit tests -Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] +Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] [-D property[=value]] ${SCRIPT} -h | --help -h | --help Show this screen. @@ -40,24 +40,27 @@ Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file- https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests + -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0> Defaults to unset __EOF } while ((${#})); do case "${1}" in -h | --help ) - usage; exit 0 ;; - -s | --source ) - SOURCE_URL="${2}"; shift 2 ;; - -k | --key ) - SIGNING_KEY="${2}"; shift 2 ;; + usage; exit 0 ;; + -s | --source ) + SOURCE_URL="${2}"; shift 2 ;; + -k | --key ) + SIGNING_KEY="${2}"; shift 2 ;; -f | --keys-file-url ) - KEY_FILE_URL="${2}"; shift 2 ;; + KEY_FILE_URL="${2}"; shift 2 ;; -o | --output-dir ) - OUTPUT_DIR="${2}"; shift 2 ;; + OUTPUT_DIR="${2}"; shift 2 ;; -P ) - MVN_ARGS="-P ${2}"; shift 2 ;; - * ) + MVN_PROFILES="-P ${2}"; shift 2 ;; + -D ) + MVN_PROPERTIES="-D ${2}"; shift 2 ;; + * ) usage >&2; exit 1 ;; esac done @@ -89,8 +92,8 @@ if [ ! -d "${OUTPUT_DIR}" ]; then fi # Maven profile must be provided -if [ -z "${MVN_ARGS}" ]; then - MVN_ARGS="-P runAllTests" +if [ -z "${MVN_PROFILES}" ]; then + MVN_PROFILES="-P runAllTests" fi OUTPUT_PATH_PREFIX="${OUTPUT_DIR}"/"${HBASE_RC_VERSION}" @@ -142,17 +145,17 @@ function unzip_from_source() { function rat_test() { rm -f "${OUTPUT_PATH_PREFIX}"_rat_test - mvn clean apache-rat:check 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 + mvn clean apache-rat:check "${MVN_PROPERTIES}" 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 } function build_from_source() { rm -f "${OUTPUT_PATH_PREFIX}"_build_from_source - mvn clean install -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 + mvn clean install "${MVN_PROPERTIES}" -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 } function run_tests() { rm -f "${OUTPUT_PATH_PREFIX}"_run_tests - mvn package "${MVN_ARGS}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 + mvn package "${MVN_PROFILES}" "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 } function execute() { @@ -164,11 +167,11 @@ function print_when_exit() { * Signature: $( ((SIGNATURE_PASSED)) && echo "ok" || echo "failed" ) * Checksum : $( ((CHECKSUM_PASSED)) && echo "ok" || echo "failed" ) * Rat check (${JAVA_VERSION}): $( ((RAT_CHECK_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean apache-rat:check + - mvn clean apache-rat:check "${MVN_PROPERTIES}" * Built from source (${JAVA_VERSION}): $( ((BUILD_FROM_SOURCE_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean install -DskipTests + - mvn clean install -DskipTests "${MVN_PROPERTIES}" * Unit tests pass (${JAVA_VERSION}): $( ((UNIT_TEST_PASSED)) && echo "ok" || echo "failed" ) - - mvn package ${MVN_ARGS} + - mvn package ${MVN_PROFILES} "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 __EOF if ((CHECKSUM_PASSED)) && ((SIGNATURE_PASSED)) && ((RAT_CHECK_PASSED)) && ((BUILD_FROM_SOURCE_PASSED)) && ((UNIT_TEST_PASSED)) ; then exit 0 diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 27c369255459..3e2e3938f459 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -1152,7 +1152,7 @@ hbase-vote. A script for standard vote which verifies the following items 4. Built from source 5. Unit tests -Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] +Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] [-D property[=value]] hbase-vote.sh -h | --help -h | --help Show this screen. @@ -1162,6 +1162,8 @@ Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-f -f | --keys-file-url '' the URL of the key file, default is https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target + -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests + -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0> Defaults to unset ---- * If you see any unit test failures, please call out the solo test result and whether it's part of flaky (nightly) tests dashboard, e.g. link:https://builds.apache.org/view/H-L/view/HBase/job/HBase-Find-Flaky-Tests/job/master/lastSuccessfulBuild/artifact/dashboard.html[dashboard of master branch] (please change the test branch accordingly). From 1093e34967f0e59c212c0d96ef611f0fff6c6f10 Mon Sep 17 00:00:00 2001 From: Joseph295 <517536891@qq.com> Date: Sat, 26 Sep 2020 16:55:54 +0800 Subject: [PATCH 077/769] HBASE-25088 CatalogFamilyFormat/MetaTableAccessor.parseRegionInfoFromRegionName incorrectly setEndKey to regionId (#2448) Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang --- .../hadoop/hbase/CatalogFamilyFormat.java | 2 +- .../hadoop/hbase/TestCatalogFamilyFormat.java | 32 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index a2297b66ab32..d0ee3dc83326 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -101,7 +101,7 @@ public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws long regionId = Long.parseLong(Bytes.toString(fields[2])); int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1]) - .setEndKey(fields[2]).setSplit(false).setRegionId(regionId).setReplicaId(replicaId).build(); + .setRegionId(regionId).setReplicaId(replicaId).build(); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java index 78e0fdba3016..628655a083c2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java @@ -19,13 +19,19 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.ClassRule; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; @Category({ ClientTests.class, SmallTests.class }) public class TestCatalogFamilyFormat { @@ -34,6 +40,9 @@ public class TestCatalogFamilyFormat { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCatalogFamilyFormat.class); + @Rule + public TestName name = new TestName(); + @Test public void testParseReplicaIdFromServerColumn() { String column1 = HConstants.SERVER_QUALIFIER_STR; @@ -70,4 +79,27 @@ public void testMetaReaderGetColumnMethods() { HConstants.SEQNUM_QUALIFIER_STR + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"), CatalogFamilyFormat.getSeqNumColumn(42)); } + + /** + * The info we can get from the regionName is: table name, start key, regionId, replicaId. + */ + @Test + public void testParseRegionInfoFromRegionName() throws IOException { + RegionInfo originalRegionInfo = RegionInfoBuilder.newBuilder( + TableName.valueOf(name.getMethodName())).setRegionId(999999L) + .setStartKey(Bytes.toBytes("2")).setEndKey(Bytes.toBytes("3")) + .setReplicaId(1).build(); + RegionInfo newParsedRegionInfo = CatalogFamilyFormat + .parseRegionInfoFromRegionName(originalRegionInfo.getRegionName()); + assertEquals("Parse TableName error", originalRegionInfo.getTable(), + newParsedRegionInfo.getTable()); + assertEquals("Parse regionId error", originalRegionInfo.getRegionId(), + newParsedRegionInfo.getRegionId()); + assertTrue("Parse startKey error", Bytes.equals(originalRegionInfo.getStartKey(), + newParsedRegionInfo.getStartKey())); + assertEquals("Parse replicaId error", originalRegionInfo.getReplicaId(), + newParsedRegionInfo.getReplicaId()); + assertTrue("We can't parse endKey from regionName only", + Bytes.equals(HConstants.EMPTY_END_ROW, newParsedRegionInfo.getEndKey())); + } } From 380585ef20d8139e9aadffa4adb8189fa3824554 Mon Sep 17 00:00:00 2001 From: XinSun Date: Sat, 26 Sep 2020 19:49:02 +0800 Subject: [PATCH 078/769] HBASE-25098 ReplicationStatisticsChore runs in wrong time unit (#2460) Signed-off-by: Viraj Jasani Signed-off-by: Guanghao Zhang --- .../hbase/replication/ReplicationSinkServiceImpl.java | 8 +++++--- .../hbase/replication/regionserver/Replication.java | 9 ++++++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java index 9b0e3f79fe07..91dd8d08e675 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -51,7 +52,7 @@ public class ReplicationSinkServiceImpl implements ReplicationSinkService { // ReplicationLoad to access replication metrics private ReplicationLoad replicationLoad; - private int statsPeriod; + private int statsPeriodInSecond; @Override public void replicateLogEntries(List entries, CellScanner cells, @@ -66,7 +67,7 @@ public void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir WALProvider walProvider) throws IOException { this.server = server; this.conf = server.getConfiguration(); - this.statsPeriod = + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); } @@ -75,7 +76,8 @@ public void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir public void startReplicationService() throws IOException { this.replicationSink = new ReplicationSink(this.conf); this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSinkStatistics", server, statsPeriod)); + new ReplicationStatisticsChore("ReplicationSinkStatistics", server, + (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 33975edb5909..9be7b9a1e4c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.OptionalLong; import java.util.UUID; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -64,7 +66,7 @@ public class Replication implements ReplicationSourceService { private SyncReplicationPeerInfoProvider syncReplicationPeerInfoProvider; // Hosting server private Server server; - private int statsPeriod; + private int statsPeriodInSecond; // ReplicationLoad to access replication metrics private ReplicationLoad replicationLoad; private MetricsReplicationGlobalSourceSource globalMetricsSource; @@ -139,7 +141,7 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); } } - this.statsPeriod = + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); @@ -169,7 +171,8 @@ public void stopReplicationService() { public void startReplicationService() throws IOException { this.replicationManager.init(); this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSourceStatistics", server, statsPeriod)); + new ReplicationStatisticsChore("ReplicationSourceStatistics", server, + (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); LOG.info("{} started", this.server.toString()); } From c312760819ed185cab3a0717a1ea0ff6e8c47a23 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sat, 26 Sep 2020 20:16:48 +0530 Subject: [PATCH 079/769] HBASE-25045 : Add 2.3.2 to the downloads page Closes #2461 Signed-off-by: Guanghao Zhang --- src/site/xdoc/downloads.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index c49f09a9177e..37ea25af5e75 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -45,24 +45,24 @@ under the License. - 2.3.1 + 2.3.2 - 2020/08/19 + 2020/09/25 - 2.3.0 vs 2.3.1 + 2.3.1 vs 2.3.2 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
- bin (sha512 asc)
- client-bin (sha512 asc) + src (sha512 asc)
+ bin (sha512 asc)
+ client-bin (sha512 asc) From b57bef5b7dd7f9907985563f5d1071f5ac0f6eab Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Sun, 27 Sep 2020 11:35:38 +0530 Subject: [PATCH 080/769] HBASE-25077: hbck.jsp page loading fails, logs NPE in master log. (#2433) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani Signed-off-by: Pankaj Kumar --- .../main/java/org/apache/hadoop/hbase/master/HbckChore.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java index 8b886c8a4598..0973d037c8a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java @@ -255,10 +255,10 @@ private void loadRegionsFromRSReport() { for (Map.Entry entry : regionInfoMap.entrySet()) { HbckRegionInfo hri = entry.getValue(); ServerName locationInMeta = hri.getMetaEntry().getRegionServer(); + if (locationInMeta == null) { + continue; + } if (hri.getDeployedOn().size() == 0) { - if (locationInMeta == null) { - continue; - } // skip the offline region which belong to disabled table. if (disabledTableRegions.contains(hri.getRegionNameAsString())) { continue; From 86557edf2c0f286391898921b64624c412dcfc23 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Mon, 28 Sep 2020 14:47:18 +0900 Subject: [PATCH 081/769] HBASE-25096 WAL size in RegionServer UI is wrong (#2456) Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/MetricsRegionServerWrapperImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index c4328c410da4..8ce2baaef4d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -856,7 +856,7 @@ synchronized public void run() { numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) + (metaProvider == null ? 0 : metaProvider.getNumLogFiles()); walFileSize = (provider == null ? 0 : provider.getLogFileSize()) + - (provider == null ? 0 : provider.getLogFileSize()); + (metaProvider == null ? 0 : metaProvider.getLogFileSize()); // Copy over computed values so that no thread sees half computed values. numStores = tempNumStores; numStoreFiles = tempNumStoreFiles; From 80ffac29c102758d2aaafe6c83c1b8621dca5ae6 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 28 Sep 2020 12:52:00 +0530 Subject: [PATCH 082/769] HBASE-25070 : With new generic API getLogEntries, cleaning up unused RPC APIs Closes #2426 Signed-off-by: Guanghao Zhang --- .../org/apache/hadoop/hbase/client/Admin.java | 4 +-- .../hadoop/hbase/client/AsyncAdmin.java | 4 +-- .../main/protobuf/server/region/Admin.proto | 6 ----- .../hbase/regionserver/RSRpcServices.java | 27 +------------------ .../hadoop/hbase/master/MockRegionServer.java | 14 ---------- 5 files changed, 5 insertions(+), 50 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 75d55cf17839..370ab6408254 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2512,8 +2512,8 @@ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(St * Examples include slow/large RPC logs, balancer decisions by master. * * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index c2d7e8a07829..2ed624ca01f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1714,8 +1714,8 @@ default CompletableFuture> getSlowLogResponses( * Examples include slow/large RPC logs, balancer decisions by master. * * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index ca0c93209ec9..0667292917ae 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -393,12 +393,6 @@ service AdminService { rpc ExecuteProcedures(ExecuteProceduresRequest) returns(ExecuteProceduresResponse); - rpc GetSlowLogResponses(SlowLogResponseRequest) - returns(SlowLogResponses); - - rpc GetLargeLogResponses(SlowLogResponseRequest) - returns(SlowLogResponses); - rpc ClearSlowLogsResponses(ClearSlowLogResponseRequest) returns(ClearSlowLogResponses); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index be64966570f1..a59f5e609b17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3851,19 +3851,6 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, } } - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public SlowLogResponses getSlowLogResponses(final RpcController controller, - final SlowLogResponseRequest request) { - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - final List slowLogPayloads = getSlowLogPayloads(request, namedQueueRecorder); - SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder() - .addAllSlowLogPayloads(slowLogPayloads) - .build(); - return slowLogResponses; - } - private List getSlowLogPayloads(SlowLogResponseRequest request, NamedQueueRecorder namedQueueRecorder) { if (namedQueueRecorder == null) { @@ -3881,19 +3868,6 @@ private List getSlowLogPayloads(SlowLogResponseRequest request, return slowLogPayloads; } - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public SlowLogResponses getLargeLogResponses(final RpcController controller, - final SlowLogResponseRequest request) { - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - final List slowLogPayloads = getSlowLogPayloads(request, namedQueueRecorder); - SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder() - .addAllSlowLogPayloads(slowLogPayloads) - .build(); - return slowLogResponses; - } - @Override @QosPriority(priority = HConstants.ADMIN_QOS) public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, @@ -3911,6 +3885,7 @@ public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controll } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public HBaseProtos.LogEntry getLogEntries(RpcController controller, HBaseProtos.LogRequest request) throws ServiceException { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 9a7135ce1c02..69a7a79644e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -114,8 +114,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponseRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponses; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; @@ -679,18 +677,6 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, return null; } - @Override - public SlowLogResponses getSlowLogResponses(RpcController controller, - SlowLogResponseRequest request) throws ServiceException { - return null; - } - - @Override - public SlowLogResponses getLargeLogResponses(RpcController controller, - SlowLogResponseRequest request) throws ServiceException { - return null; - } - @Override public ClearSlowLogResponses clearSlowLogsResponses(RpcController controller, ClearSlowLogResponseRequest request) throws ServiceException { From fbef545989f0824fc948d723a885a4ce13a26b7b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 28 Sep 2020 20:01:43 +0800 Subject: [PATCH 083/769] HBASE-25017 Migrate flaky reporting jenkins job from Hadoop to hbase (#2466) Signed-off-by: Guanghao Zhang --- dev-support/flaky-tests/flaky-reporting.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 8a56c0bdb0cc..640b1cb54a77 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { From 68b56beab744e983df0877eec9f576ef884a2807 Mon Sep 17 00:00:00 2001 From: XinSun Date: Tue, 29 Sep 2020 08:27:37 +0800 Subject: [PATCH 084/769] HBASE-25100 conf and conn are assigned twice in HBaseReplicationEndpoint and HBaseInterClusterReplicationEndpoint (#2463) Signed-off-by: Duo Zhang Signed-off-by: Guanghao Zhang --- .../replication/HBaseReplicationEndpoint.java | 61 +++++++++++++------ .../HBaseInterClusterReplicationEndpoint.java | 33 ---------- 2 files changed, 42 insertions(+), 52 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 850a79125562..b08c99098c5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -60,10 +60,11 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private static final Logger LOG = LoggerFactory.getLogger(HBaseReplicationEndpoint.class); private ZKWatcher zkw = null; + private final Object zkwLock = new Object(); protected Configuration conf; - protected AsyncClusterConnection conn; + private AsyncClusterConnection conn; /** * Default maximum number of times a replication sink can be reported as bad before @@ -103,10 +104,6 @@ protected AsyncClusterConnection createConnection(Configuration conf) throws IOE public void init(Context context) throws IOException { super.init(context); this.conf = HBaseConfiguration.create(ctx.getConfiguration()); - // TODO: This connection is replication specific or we should make it particular to - // replication and make replication specific settings such as compression or codec to use - // passing Cells. - this.conn = createConnection(this.conf); this.ratio = ctx.getConfiguration().getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); this.badSinkThreshold = @@ -114,9 +111,19 @@ public void init(Context context) throws IOException { this.badReportCounts = Maps.newHashMap(); } - protected synchronized void disconnect() { - if (zkw != null) { - zkw.close(); + protected void disconnect() { + synchronized (zkwLock) { + if (zkw != null) { + zkw.close(); + } + } + if (this.conn != null) { + try { + this.conn.close(); + this.conn = null; + } catch (IOException e) { + LOG.warn("{} Failed to close the connection", ctx.getPeerId()); + } } } @@ -128,11 +135,11 @@ private void reconnect(KeeperException ke) { if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException || ke instanceof AuthFailedException) { String clusterKey = ctx.getPeerConfig().getClusterKey(); - LOG.warn("Lost the ZooKeeper connection for peer " + clusterKey, ke); + LOG.warn("Lost the ZooKeeper connection for peer {}", clusterKey, ke); try { reloadZkWatcher(); } catch (IOException io) { - LOG.warn("Creation of ZookeeperWatcher failed for peer " + clusterKey, io); + LOG.warn("Creation of ZookeeperWatcher failed for peer {}", clusterKey, io); } } } @@ -151,6 +158,7 @@ public void stop() { protected void doStart() { try { reloadZkWatcher(); + connectPeerCluster(); notifyStarted(); } catch (IOException e) { notifyFailed(e); @@ -168,10 +176,12 @@ protected void doStop() { // limit connections when multiple replication sources try to connect to // the peer cluster. If the peer cluster is down we can get out of control // over time. - public synchronized UUID getPeerUUID() { + public UUID getPeerUUID() { UUID peerUUID = null; try { - peerUUID = ZKClusterId.getUUIDForCluster(zkw); + synchronized (zkwLock) { + peerUUID = ZKClusterId.getUUIDForCluster(zkw); + } } catch (KeeperException ke) { reconnect(ke); } @@ -182,13 +192,24 @@ public synchronized UUID getPeerUUID() { * Closes the current ZKW (if not null) and creates a new one * @throws IOException If anything goes wrong connecting */ - private synchronized void reloadZkWatcher() throws IOException { - if (zkw != null) { - zkw.close(); + private void reloadZkWatcher() throws IOException { + synchronized (zkwLock) { + if (zkw != null) { + zkw.close(); + } + zkw = new ZKWatcher(ctx.getConfiguration(), + "connection to cluster: " + ctx.getPeerId(), this); + zkw.registerListener(new PeerRegionServerListener(this)); + } + } + + private void connectPeerCluster() throws IOException { + try { + conn = createConnection(this.conf); + } catch (IOException ioe) { + LOG.warn("{} Failed to create connection for peer cluster", ctx.getPeerId(), ioe); + throw ioe; } - zkw = new ZKWatcher(ctx.getConfiguration(), - "connection to cluster: " + ctx.getPeerId(), this); - zkw.registerListener(new PeerRegionServerListener(this)); } @Override @@ -211,7 +232,9 @@ public boolean isAborted() { protected List fetchSlavesAddresses() { List children = null; try { - children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); + synchronized (zkwLock) { + children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); + } } catch (KeeperException ke) { if (LOG.isDebugEnabled()) { LOG.debug("Fetch slaves addresses failed", ke); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index b6e1f69173fe..b127b467505d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -44,14 +44,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -59,7 +56,6 @@ import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Threads; @@ -127,7 +123,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi @Override public void init(Context context) throws IOException { super.init(context); - this.conf = HBaseConfiguration.create(ctx.getConfiguration()); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", @@ -139,10 +134,6 @@ public void init(Context context) throws IOException { DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); this.maxTerminationWait = maxTerminationWaitMultiplier * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - // TODO: This connection is replication specific or we should make it particular to - // replication and make replication specific settings such as compression or codec to use - // passing Cells. - this.conn = createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); @@ -412,19 +403,6 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt return entryList; } - private void reconnectToPeerCluster() { - AsyncClusterConnection connection = null; - try { - connection = - ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent()); - } catch (IOException ioe) { - LOG.warn("{} Failed to create connection for peer cluster", logPeerId(), ioe); - } - if (connection != null) { - this.conn = connection; - } - } - private long parallelReplicate(CompletionService pool, ReplicateContext replicateContext, List> batches) throws IOException { int futures = 0; @@ -504,9 +482,6 @@ public boolean replicate(ReplicateContext replicateContext) { } continue; } - if (this.conn == null) { - reconnectToPeerCluster(); - } try { // replicate the batches to sink side. parallelReplicate(pool, replicateContext, batches); @@ -564,14 +539,6 @@ protected boolean isPeerEnabled() { @Override protected void doStop() { disconnect(); // don't call super.doStop() - if (this.conn != null) { - try { - this.conn.close(); - this.conn = null; - } catch (IOException e) { - LOG.warn("{} Failed to close the connection", logPeerId()); - } - } // Allow currently running replication tasks to finish exec.shutdown(); try { From 3aa612f0fe410dcfc3cfb5a9c7327ef135355bc5 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 29 Sep 2020 15:25:05 +0800 Subject: [PATCH 085/769] Revert "HBASE-25017 Migrate flaky reporting jenkins job from Hadoop to hbase (#2466)" This reverts commit fbef545989f0824fc948d723a885a4ce13a26b7b. --- dev-support/flaky-tests/flaky-reporting.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 640b1cb54a77..8a56c0bdb0cc 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'hbase' + label 'Hadoop' } } triggers { From 9d6af969acf93cce6ff851f0c66c4ff16db84df5 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 28 Sep 2020 20:01:43 +0800 Subject: [PATCH 086/769] HBASE-25107 Migrate flaky reporting jenkins job from Hadoop to hbase (#2466) Signed-off-by: Guanghao Zhang --- dev-support/flaky-tests/flaky-reporting.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 8a56c0bdb0cc..640b1cb54a77 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { From 5d926627ae7692bb46e4febaf04a14f2fdd30a30 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Tue, 29 Sep 2020 15:36:03 +0800 Subject: [PATCH 087/769] =?UTF-8?q?HBASE-24967=20The=20table.jsp=20cost=20?= =?UTF-8?q?long=20time=20to=20load=20if=20the=20table=20include=E2=80=A6?= =?UTF-8?q?=20(#2326)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * HBASE-24967 The table.jsp cost long time to load if the table include closed regions * fix it by another way * fix review issue * fix checkstyle warnings * fix checkstyle warning --- .../apache/hadoop/hbase/RegionMetrics.java | 6 ++ .../hadoop/hbase/RegionMetricsBuilder.java | 25 +++++++- .../hbase/shaded/protobuf/ProtobufUtil.java | 17 +++++ .../main/protobuf/server/ClusterStatus.proto | 10 +++ .../apache/hadoop/hbase/master/HMaster.java | 54 ++++++++++++++-- .../hbase/regionserver/HRegionServer.java | 2 +- .../resources/hbase-webapps/master/table.jsp | 18 +----- .../master/TestRegionsRecoveryChore.java | 6 ++ .../regionserver/TestCompactionState.java | 63 +++++++++++++++---- 9 files changed, 166 insertions(+), 35 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 7d732607ae36..8cd3ea156c4d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase; import java.util.Map; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -182,4 +183,9 @@ default String getNameAsString() { * @return the block total weight of this region */ long getBlocksTotalWeight(); + + /** + * @return the compaction state of this region + */ + CompactionState getCompactionState(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index c3839662ac27..8349c35d7d33 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -24,12 +24,14 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -58,6 +60,8 @@ public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regio .setBlocksLocalWithSsdWeight(regionLoadPB.hasBlocksLocalWithSsdWeight() ? regionLoadPB.getBlocksLocalWithSsdWeight() : 0) .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) + .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad( + regionLoadPB.getCompactionState())) .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) @@ -159,6 +163,7 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWeight; private long blocksLocalWithSsdWeight; private long blocksTotalWeight; + private CompactionState compactionState; private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -263,6 +268,11 @@ public RegionMetricsBuilder setBlocksTotalWeight(long value) { this.blocksTotalWeight = value; return this; } + public RegionMetricsBuilder setCompactionState(CompactionState compactionState) { + this.compactionState = compactionState; + return this; + } + public RegionMetrics build() { return new RegionMetricsImpl(name, storeCount, @@ -289,7 +299,8 @@ public RegionMetrics build() { dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, - blocksTotalWeight); + blocksTotalWeight, + compactionState); } private static class RegionMetricsImpl implements RegionMetrics { @@ -319,6 +330,7 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWeight; private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; + private final CompactionState compactionState; RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, @@ -344,7 +356,8 @@ private static class RegionMetricsImpl implements RegionMetrics { float dataLocalityForSsd, long blocksLocalWeight, long blocksLocalWithSsdWeight, - long blocksTotalWeight) { + long blocksTotalWeight, + CompactionState compactionState) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -371,6 +384,7 @@ private static class RegionMetricsImpl implements RegionMetrics { this.blocksLocalWeight = blocksLocalWeight; this.blocksLocalWithSsdWeight = blocksLocalWithSsdWeight; this.blocksTotalWeight = blocksTotalWeight; + this.compactionState = compactionState; } @Override @@ -503,6 +517,11 @@ public long getBlocksTotalWeight() { return blocksTotalWeight; } + @Override + public CompactionState getCompactionState() { + return compactionState; + } + @Override public String toString() { StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", @@ -562,6 +581,8 @@ public String toString() { blocksLocalWithSsdWeight); Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); + Strings.appendKeyValue(sb, "compactionState", + compactionState); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d2217c65dd03..d5fdb89302c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2960,6 +2960,23 @@ public static GetRegionInfoResponse.CompactionState createCompactionState(Compac return GetRegionInfoResponse.CompactionState.valueOf(state.toString()); } + /** + * Creates {@link CompactionState} from + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos + * .RegionLoad.CompactionState} state + * @param state the protobuf CompactionState + * @return CompactionState + */ + public static CompactionState createCompactionStateForRegionLoad( + RegionLoad.CompactionState state) { + return CompactionState.valueOf(state.toString()); + } + + public static RegionLoad.CompactionState createCompactionStateForRegionLoad( + CompactionState state) { + return RegionLoad.CompactionState.valueOf(state.toString()); + } + public static Optional toOptionalTimestamp(MajorCompactionTimestampResponse resp) { long timestamp = resp.getCompactionTimestamp(); return timestamp == 0 ? Optional.empty() : Optional.of(timestamp); diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 0c8e89d185d8..35f3c2d054b5 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -167,6 +167,16 @@ message RegionLoad { /** The current blocks total weight for region in the regionserver */ optional uint64 blocks_total_weight = 26; + + /** The compaction state for region */ + optional CompactionState compaction_state = 27; + + enum CompactionState { + NONE = 0; + MINOR = 1; + MAJOR = 2; + MAJOR_AND_MINOR = 3; + } } message UserLoad { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index e4bd3c5fce22..cf43c8b814c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -75,13 +75,16 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PleaseHoldException; +import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; @@ -237,7 +240,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** @@ -3445,12 +3448,12 @@ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws I * @param tableName The current table name. * @return If a given table is in mob file compaction now. */ - public CompactionState getMobCompactionState(TableName tableName) { + public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) { AtomicInteger compactionsCount = mobCompactionStates.get(tableName); if (compactionsCount != null && compactionsCount.get() != 0) { - return CompactionState.MAJOR_AND_MINOR; + return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR; } - return CompactionState.NONE; + return GetRegionInfoResponse.CompactionState.NONE; } public void reportMobCompactionStart(TableName tableName) throws IOException { @@ -3900,4 +3903,47 @@ public MetaRegionLocationCache getMetaRegionLocationCache() { public RSGroupInfoManager getRSGroupInfoManager() { return rsGroupInfoManager; } + + /** + * Get the compaction state of the table + * + * @param tableName The table name + * @return CompactionState Compaction state of the table + */ + public CompactionState getCompactionState(final TableName tableName) { + CompactionState compactionState = CompactionState.NONE; + try { + List regions = + assignmentManager.getRegionStates().getRegionsOfTable(tableName, false); + for (RegionInfo regionInfo : regions) { + ServerName serverName = + assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo); + if (serverName == null) { + continue; + } + ServerMetrics sl = serverManager.getLoad(serverName); + if (sl == null) { + continue; + } + RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName()); + if (regionMetrics.getCompactionState() == CompactionState.MAJOR) { + if (compactionState == CompactionState.MINOR) { + compactionState = CompactionState.MAJOR_AND_MINOR; + } else { + compactionState = CompactionState.MAJOR; + } + } else if (regionMetrics.getCompactionState() == CompactionState.MINOR) { + if (compactionState == CompactionState.MAJOR) { + compactionState = CompactionState.MAJOR_AND_MINOR; + } else { + compactionState = CompactionState.MINOR; + } + } + } + } catch (Exception e) { + compactionState = null; + LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e); + } + return compactionState; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index cd90fb87d9a2..d6eb45fe65e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1758,9 +1758,9 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, .setBlocksLocalWeight(blocksLocalWeight) .setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight) .setBlocksTotalWeight(blocksTotalWeight) + .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())) .setLastMajorCompactionTs(r.getOldestHfileTs(true)); r.setCompleteSequenceId(regionLoadBldr); - return regionLoadBldr.build(); } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index e46b2778546d..23eeb3ab740f 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -30,6 +30,7 @@ import="java.util.HashSet" import="java.util.Optional" import="java.util.TreeMap" + import="java.util.concurrent.TimeoutException" import="java.util.concurrent.TimeUnit" import="org.apache.commons.lang3.StringEscapeUtils" import="org.apache.hadoop.conf.Configuration" @@ -654,21 +655,8 @@ <% if (master.getAssignmentManager().isTableEnabled(table.getName())) { - try { - CompactionState compactionState = admin.getCompactionState(table.getName()).get(); - %><%= compactionState %><% - } catch (Exception e) { - - if(e.getCause() != null && e.getCause().getCause() instanceof NotServingRegionException) { - %><%= CompactionState.NONE %><% - } else { - // Nothing really to do here - for(StackTraceElement element : e.getStackTrace()) { - %><%= StringEscapeUtils.escapeHtml4(element.toString()) %><% - } - %> Unknown <% - } - } + CompactionState compactionState = master.getCompactionState(table.getName()); + %><%= compactionState==null?"UNKNOWN":compactionState %><% } else { %><%= CompactionState.NONE %><% } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java index 50361edd6d8d..2208f5a8107a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UserMetrics; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -512,6 +513,11 @@ public long getBlocksLocalWithSsdWeight() { public long getBlocksTotalWeight() { return 0; } + + @Override + public CompactionState getCompactionState() { + return null; + } }; return regionMetrics; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index 599db542ff41..39171da02fae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -69,24 +70,50 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + enum StateSource { + ADMIN, MASTER + } + + @Test + public void testMajorCompactionStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, false, StateSource.ADMIN); + } + + @Test + public void testMinorCompactionStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, false, StateSource.ADMIN); + } + + @Test + public void testMajorCompactionOnFamilyStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, true, StateSource.ADMIN); + } + @Test - public void testMajorCompaction() throws IOException, InterruptedException { - compaction(name.getMethodName(), 8, CompactionState.MAJOR, false); + public void testMinorCompactionOnFamilyStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, true, StateSource.ADMIN); } @Test - public void testMinorCompaction() throws IOException, InterruptedException { - compaction(name.getMethodName(), 15, CompactionState.MINOR, false); + public void testMajorCompactionStateFromMaster() throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, false, StateSource.MASTER); } @Test - public void testMajorCompactionOnFamily() throws IOException, InterruptedException { - compaction(name.getMethodName(), 8, CompactionState.MAJOR, true); + public void testMinorCompactionStateFromMaster() throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, false, StateSource.MASTER); } @Test - public void testMinorCompactionOnFamily() throws IOException, InterruptedException { - compaction(name.getMethodName(), 15, CompactionState.MINOR, true); + public void testMajorCompactionOnFamilyStateFromMaster() + throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, true, StateSource.MASTER); + } + + @Test + public void testMinorCompactionOnFamilyStateFromMaster() + throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, true, StateSource.MASTER); } @Test @@ -127,11 +154,12 @@ public void testInvalidColumnFamily() throws IOException, InterruptedException { * @param flushes * @param expectedState * @param singleFamily otherwise, run compaction on all cfs + * @param stateSource get the state by Admin or Master * @throws IOException * @throws InterruptedException */ private void compaction(final String tableName, final int flushes, - final CompactionState expectedState, boolean singleFamily) + final CompactionState expectedState, boolean singleFamily, StateSource stateSource) throws IOException, InterruptedException { // Create a table with regions TableName table = TableName.valueOf(tableName); @@ -143,6 +171,7 @@ private void compaction(final String tableName, final int flushes, ht = TEST_UTIL.createTable(table, families); loadData(ht, families, 3000, flushes); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); + HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); List regions = rs.getRegions(table); int countBefore = countStoreFilesInFamilies(regions, families); int countBeforeSingleFamily = countStoreFilesInFamily(regions, family); @@ -164,10 +193,10 @@ private void compaction(final String tableName, final int flushes, long curt = System.currentTimeMillis(); long waitTime = 5000; long endt = curt + waitTime; - CompactionState state = admin.getCompactionState(table); + CompactionState state = getCompactionState(stateSource, master, admin, table); while (state == CompactionState.NONE && curt < endt) { Thread.sleep(10); - state = admin.getCompactionState(table); + state = getCompactionState(stateSource, master, admin, table); curt = System.currentTimeMillis(); } // Now, should have the right compaction state, @@ -179,10 +208,10 @@ private void compaction(final String tableName, final int flushes, } } else { // Wait until the compaction is done - state = admin.getCompactionState(table); + state = getCompactionState(stateSource, master, admin, table); while (state != CompactionState.NONE && curt < endt) { Thread.sleep(10); - state = admin.getCompactionState(table); + state = getCompactionState(stateSource, master, admin, table); } // Now, compaction should be done. assertEquals(CompactionState.NONE, state); @@ -210,6 +239,14 @@ private void compaction(final String tableName, final int flushes, } } + private static CompactionState getCompactionState(StateSource stateSource, HMaster master, + Admin admin, TableName table) throws IOException { + CompactionState state = stateSource == StateSource.ADMIN ? + admin.getCompactionState(table) : + master.getCompactionState(table); + return state; + } + private static int countStoreFilesInFamily( List regions, final byte[] family) { return countStoreFilesInFamilies(regions, new byte[][]{family}); From b268b1f621388469094439714fb3aaa86f34da22 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 29 Sep 2020 15:48:57 +0800 Subject: [PATCH 088/769] HBASE-25103 Remove ZNodePaths.metaReplicaZNodes (#2464) Signed-off-by: Huaxiang Sun --- .../hbase/client/ZKConnectionRegistry.java | 2 +- .../hadoop/hbase/zookeeper/ZNodePaths.java | 68 ++++++------------- .../hbase/master/MasterMetaBootstrap.java | 2 +- .../hbase/master/MetaRegionLocationCache.java | 6 +- .../hbase/master/zksyncer/ClientZKSyncer.java | 2 +- .../master/zksyncer/MetaLocationSyncer.java | 12 ++-- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 10 +-- 7 files changed, 38 insertions(+), 64 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 42a418859f18..f1f052138637 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -141,7 +141,7 @@ private void getMetaRegionLocation(CompletableFuture future, HRegionLocation[] locs = new HRegionLocation[metaReplicaZNodes.size()]; MutableInt remaining = new MutableInt(locs.length); for (String metaReplicaZNode : metaReplicaZNodes) { - int replicaId = znodePaths.getMetaReplicaIdFromZnode(metaReplicaZNode); + int replicaId = znodePaths.getMetaReplicaIdFromZNode(metaReplicaZNode); String path = ZNodePaths.joinZNode(znodePaths.baseZNode, metaReplicaZNode); if (replicaId == DEFAULT_REPLICA_ID) { addListener(getAndConvert(path, ZKConnectionRegistry::getMetaProto), (proto, error) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 5c49808807ff..a0065a9e9cbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -17,22 +17,15 @@ */ package org.apache.hadoop.hbase.zookeeper; -import static org.apache.hadoop.hbase.HConstants.DEFAULT_META_REPLICA_NUM; import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; -import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; -import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; -import java.util.Collection; -import java.util.Optional; -import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; - /** * Class that hold all the paths of znode for HBase. */ @@ -55,11 +48,6 @@ public class ZNodePaths { */ private final String metaZNodePrefix; - /** - * znodes containing the locations of the servers hosting the meta replicas - */ - private final ImmutableMap metaReplicaZNodes; - // znode containing ephemeral nodes of the regionservers public final String rsZNode; // znode containing ephemeral nodes of the draining regionservers @@ -104,14 +92,7 @@ public class ZNodePaths { public ZNodePaths(Configuration conf) { baseZNode = conf.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT); - ImmutableMap.Builder builder = ImmutableMap.builder(); metaZNodePrefix = conf.get(META_ZNODE_PREFIX_CONF_KEY, META_ZNODE_PREFIX); - String defaultMetaReplicaZNode = ZNodePaths.joinZNode(baseZNode, metaZNodePrefix); - builder.put(DEFAULT_REPLICA_ID, defaultMetaReplicaZNode); - int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, DEFAULT_META_REPLICA_NUM); - IntStream.range(1, numMetaReplicas) - .forEachOrdered(i -> builder.put(i, defaultMetaReplicaZNode + "-" + i)); - metaReplicaZNodes = builder.build(); rsZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs")); drainingZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); masterAddressZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); @@ -142,7 +123,6 @@ public ZNodePaths(Configuration conf) { public String toString() { return new StringBuilder() .append("ZNodePaths [baseZNode=").append(baseZNode) - .append(", metaReplicaZNodes=").append(metaReplicaZNodes) .append(", rsZNode=").append(rsZNode) .append(", drainingZNode=").append(drainingZNode) .append(", masterAddressZNode=").append(masterAddressZNode) @@ -164,29 +144,15 @@ public String toString() { .append("]").toString(); } - /** - * @return true if the znode is a meta region replica - */ - public boolean isAnyMetaReplicaZNode(String node) { - return this.metaReplicaZNodes.containsValue(node); - } - - /** - * @return Meta Replica ZNodes - */ - public Collection getMetaReplicaZNodes() { - return this.metaReplicaZNodes.values(); - } - /** * @return the znode string corresponding to a replicaId */ public String getZNodeForReplica(int replicaId) { - // return a newly created path but don't update the cache of paths - // This is mostly needed for tests that attempt to create meta replicas - // from outside the master - return Optional.ofNullable(metaReplicaZNodes.get(replicaId)) - .orElseGet(() -> metaReplicaZNodes.get(DEFAULT_REPLICA_ID) + "-" + replicaId); + if (RegionReplicaUtil.isDefaultReplica(replicaId)) { + return joinZNode(baseZNode, metaZNodePrefix); + } else { + return joinZNode(baseZNode, metaZNodePrefix + "-" + replicaId); + } } /** @@ -198,7 +164,7 @@ public int getMetaReplicaIdFromPath(String path) { // Extract the znode from path. The prefix is of the following format. // baseZNode + PATH_SEPARATOR. int prefixLen = baseZNode.length() + 1; - return getMetaReplicaIdFromZnode(path.substring(prefixLen)); + return getMetaReplicaIdFromZNode(path.substring(prefixLen)); } /** @@ -206,7 +172,7 @@ public int getMetaReplicaIdFromPath(String path) { * @param znode the name of the znode, does not include baseZNode * @return replicaId */ - public int getMetaReplicaIdFromZnode(String znode) { + public int getMetaReplicaIdFromZNode(String znode) { return znode.equals(metaZNodePrefix)? RegionInfo.DEFAULT_REPLICA_ID: Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); @@ -220,17 +186,25 @@ public boolean isMetaZNodePrefix(String znode) { } /** - * Returns whether the znode is supposed to be readable by the client and DOES NOT contain + * @return True is the fully qualified path is for meta location + */ + public boolean isMetaZNodePath(String path) { + int prefixLen = baseZNode.length() + 1; + return path.length() > prefixLen && isMetaZNodePrefix(path.substring(prefixLen)); + } + + /** + * Returns whether the path is supposed to be readable by the client and DOES NOT contain * sensitive information (world readable). */ - public boolean isClientReadable(String node) { + public boolean isClientReadable(String path) { // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS // all clients need to access this data to work. Using zk for sharing data to clients (other // than service lookup case is not a recommended design pattern. - return node.equals(baseZNode) || isAnyMetaReplicaZNode(node) || - node.equals(masterAddressZNode) || node.equals(clusterIdZNode) || node.equals(rsZNode) || + return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) || + path.equals(clusterIdZNode) || path.equals(rsZNode) || // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not - node.equals(tableZNode) || node.startsWith(tableZNode + "/"); + path.equals(tableZNode) || path.startsWith(tableZNode + "/"); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java index c676df8b6c88..0b3476fc9dd5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java @@ -92,7 +92,7 @@ private void unassignExcessMetaReplica(int numMetaReplicasConfigured) { try { List metaReplicaZnodes = zooKeeper.getMetaReplicaNodes(); for (String metaReplicaZnode : metaReplicaZnodes) { - int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZnode(metaReplicaZnode); + int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaZnode); if (replicaId >= numMetaReplicasConfigured) { RegionState r = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId); LOG.info("Closing excess replica of meta region " + r.getRegion()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java index f4e91b56051d..07512d16fd60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java @@ -157,7 +157,7 @@ private HRegionLocation getMetaRegionLocation(int replicaId) } private void updateMetaLocation(String path, ZNodeOpType opType) { - if (!isValidMetaZNode(path)) { + if (!isValidMetaPath(path)) { return; } LOG.debug("Updating meta znode for path {}: {}", path, opType.name()); @@ -220,8 +220,8 @@ public Optional> getMetaRegionLocations() { * Helper to check if the given 'path' corresponds to a meta znode. This listener is only * interested in changes to meta znodes. */ - private boolean isValidMetaZNode(String path) { - return watcher.getZNodePaths().isAnyMetaReplicaZNode(path); + private boolean isValidMetaPath(String path) { + return watcher.getZNodePaths().isMetaZNodePath(path); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java index b1c70c569356..38dc11218687 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java @@ -207,7 +207,7 @@ public synchronized void nodeDeleted(String path) { /** * @return the znode(s) to watch */ - abstract Collection getNodesToWatch(); + abstract Collection getNodesToWatch() throws KeeperException; /** * Thread to synchronize znode data to client ZK cluster diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java index 98d73224ce9b..dca5cadf8adf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java @@ -19,10 +19,12 @@ package org.apache.hadoop.hbase.master.zksyncer; import java.util.Collection; - +import java.util.stream.Collectors; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; /** * Tracks the meta region locations on server ZK cluster and synchronize them to client ZK cluster @@ -36,11 +38,13 @@ public MetaLocationSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server s @Override boolean validate(String path) { - return watcher.getZNodePaths().isAnyMetaReplicaZNode(path); + return watcher.getZNodePaths().isMetaZNodePath(path); } @Override - Collection getNodesToWatch() { - return watcher.getZNodePaths().getMetaReplicaZNodes(); + Collection getNodesToWatch() throws KeeperException { + return watcher.getMetaReplicaNodes().stream() + .map(znode -> ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode)) + .collect(Collectors.toList()); } } \ No newline at end of file diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 19d11d0704fc..45732d2efddf 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -37,14 +37,11 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.security.Superusers; @@ -78,6 +75,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; @@ -1861,9 +1859,7 @@ public static String dump(ZKWatcher zkw) { } sb.append("\nRegion server holding hbase:meta: " + MetaTableLocator.getMetaRegionLocation(zkw)); - Configuration conf = HBaseConfiguration.create(); - int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int numMetaReplicas = zkw.getMetaReplicaNodes().size(); for (int i = 1; i < numMetaReplicas; i++) { sb.append("\nRegion server holding hbase:meta, replicaId " + i + " " + MetaTableLocator.getMetaRegionLocation(zkw, i)); @@ -2109,7 +2105,7 @@ private static void logRetrievedMsg(final ZKWatcher zkw, " byte(s) of data from znode " + znode + (watcherSet? " and set watcher; ": "; data=") + (data == null? "null": data.length == 0? "empty": ( - zkw.getZNodePaths().isMetaZNodePrefix(znode)? + zkw.getZNodePaths().isMetaZNodePath(znode)? getServerNameOrEmptyString(data): znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)? getServerNameOrEmptyString(data): From 9fc29c4cbf6671589f34b15e4ab41970bd4f3b45 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 29 Sep 2020 10:00:57 +0100 Subject: [PATCH 089/769] HBASE-24877 addendum: additional checks to avoid one extra possible race control in the initialize loop (#2400) Signed-off-by: Duo Zhang Signed-off-by: Josh Elser --- .../regionserver/ReplicationSource.java | 58 +++++++------ .../regionserver/TestReplicationSource.java | 84 ++++++++++++------- 2 files changed, 88 insertions(+), 54 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index bf8127f93abb..82120736bd42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -39,7 +39,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -128,7 +127,9 @@ public class ReplicationSource implements ReplicationSourceInterface { //so that it doesn't try submit another initialize thread. //NOTE: this should only be set to false at the end of initialize method, prior to return. private AtomicBoolean startupOngoing = new AtomicBoolean(false); - + //Flag that signalizes uncaught error happening while starting up the source + // and a retry should be attempted + private AtomicBoolean retryStartup = new AtomicBoolean(false); /** * A filter (or a chain of filters) for WAL entries; filters out edits. @@ -375,7 +376,7 @@ private void tryStartNewShipper(String walGroupId, PriorityBlockingQueue q LOG.debug("{} preempted start of worker walGroupId={}", logPeerId(), walGroupId); return value; } else { - LOG.debug("{} starting worker for walGroupId={}", logPeerId(), walGroupId); + LOG.debug("{} starting worker for walGroupId={}", logPeerId(), walGroupId); ReplicationSourceShipper worker = createNewShipper(walGroupId, queue); ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, queue, worker.getStartPosition()); @@ -570,6 +571,7 @@ private void initialize() { if (sleepForRetries("Error starting ReplicationEndpoint", sleepMultiplier)) { sleepMultiplier++; } else { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new RuntimeException("Exhausted retries to start replication endpoint."); } @@ -577,6 +579,7 @@ private void initialize() { } if (!this.isSourceActive()) { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } @@ -600,6 +603,7 @@ private void initialize() { } if(!this.isSourceActive()) { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } @@ -618,28 +622,34 @@ private void initialize() { @Override public void startup() { - if (this.sourceRunning) { - return; - } + // mark we are running now this.sourceRunning = true; - //Flag that signalizes uncaught error happening while starting up the source - // and a retry should be attempted - MutableBoolean retryStartup = new MutableBoolean(true); - do { - if(retryStartup.booleanValue()) { - retryStartup.setValue(false); - startupOngoing.set(true); - // mark we are running now - initThread = new Thread(this::initialize); - Threads.setDaemonThreadRunning(initThread, - Thread.currentThread().getName() + ".replicationSource," + this.queueId, - (t,e) -> { - sourceRunning = false; - uncaughtException(t, e, null, null); - retryStartup.setValue(!this.abortOnError); - }); - } - } while (this.startupOngoing.get() && !this.abortOnError); + startupOngoing.set(true); + initThread = new Thread(this::initialize); + Threads.setDaemonThreadRunning(initThread, + Thread.currentThread().getName() + ".replicationSource," + this.queueId, + (t,e) -> { + //if first initialization attempt failed, and abortOnError is false, we will + //keep looping in this thread until initialize eventually succeeds, + //while the server main startup one can go on with its work. + sourceRunning = false; + uncaughtException(t, e, null, null); + retryStartup.set(!this.abortOnError); + do { + if(retryStartup.get()) { + this.sourceRunning = true; + startupOngoing.set(true); + retryStartup.set(false); + try { + initialize(); + } catch(Throwable error){ + sourceRunning = false; + uncaughtException(t, error, null, null); + retryStartup.set(!this.abortOnError); + } + } + } while ((this.startupOngoing.get() || this.retryStartup.get()) && !this.abortOnError); + }); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 8b8dcd8afa28..ded9e8f28e21 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -444,7 +444,7 @@ protected void doStop() { /** * Deadend Endpoint. Does nothing. */ - public static class FaultyReplicationEndpoint extends DoNothingReplicationEndpoint { + public static class FlakyReplicationEndpoint extends DoNothingReplicationEndpoint { static int count = 0; @@ -460,6 +460,17 @@ public synchronized UUID getPeerUUID() { } + public static class FaultyReplicationEndpoint extends DoNothingReplicationEndpoint { + + static int count = 0; + + @Override + public synchronized UUID getPeerUUID() { + throw new RuntimeException(); + } + + } + /** * Test HBASE-20497 * Moved here from TestReplicationSource because doesn't need cluster. @@ -488,22 +499,16 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception assertEquals(1001L, shipper.getStartPosition()); } - /** - * Test ReplicationSource retries startup once an uncaught exception happens - * during initialization and eplication.source.regionserver.abort is set to false. - */ - @Test - public void testAbortFalseOnError() throws IOException { - ReplicationSource rs = new ReplicationSource(); - Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + private RegionServerServices setupForAbortTests(ReplicationSource rs, Configuration conf, + String endpointName) throws IOException { conf.setInt("replication.source.maxretriesmultiplier", 1); - conf.setBoolean("replication.source.regionserver.abort", false); ReplicationPeer mockPeer = Mockito.mock(ReplicationPeer.class); Mockito.when(mockPeer.getConfiguration()).thenReturn(conf); Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class); + FaultyReplicationEndpoint.count = 0; Mockito.when(peerConfig.getReplicationEndpointImpl()). - thenReturn(FaultyReplicationEndpoint.class.getName()); + thenReturn(endpointName); Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); @@ -512,6 +517,20 @@ public void testAbortFalseOnError() throws IOException { TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); rs.init(conf, null, manager, null, mockPeer, rss, queueId, null, p -> OptionalLong.empty(), new MetricsSource(queueId)); + return rss; + } + + /** + * Test ReplicationSource retries startup once an uncaught exception happens + * during initialization and eplication.source.regionserver.abort is set to false. + */ + @Test + public void testAbortFalseOnError() throws IOException { + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.setBoolean("replication.source.regionserver.abort", false); + ReplicationSource rs = new ReplicationSource(); + RegionServerServices rss = setupForAbortTests(rs, conf, + FlakyReplicationEndpoint.class.getName()); try { rs.startup(); assertTrue(rs.isSourceActive()); @@ -526,34 +545,39 @@ public void testAbortFalseOnError() throws IOException { } } + /** + * Test ReplicationSource keeps retrying startup indefinitely without blocking the main thread, + * when eplication.source.regionserver.abort is set to false. + */ + @Test + public void testAbortFalseOnErrorDoesntBlockMainThread() throws IOException { + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + ReplicationSource rs = new ReplicationSource(); + RegionServerServices rss = setupForAbortTests(rs, conf, + FaultyReplicationEndpoint.class.getName()); + try { + rs.startup(); + assertTrue(true); + } finally { + rs.terminate("Done"); + rss.stop("Done"); + } + } + /** * Test ReplicationSource retries startup once an uncaught exception happens - * during initialization and replication.source.regionserver.abort is set to false. + * during initialization and replication.source.regionserver.abort is set to true. */ @Test public void testAbortTrueOnError() throws IOException { - ReplicationSource rs = new ReplicationSource(); Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.setInt("replication.source.maxretriesmultiplier", 1); - ReplicationPeer mockPeer = Mockito.mock(ReplicationPeer.class); - Mockito.when(mockPeer.getConfiguration()).thenReturn(conf); - Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); - ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class); - Mockito.when(peerConfig.getReplicationEndpointImpl()). - thenReturn(FaultyReplicationEndpoint.class.getName()); - Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); - ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); - Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); - String queueId = "qid"; - RegionServerServices rss = - TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); - rs.init(conf, null, manager, null, mockPeer, rss, queueId, null, - p -> OptionalLong.empty(), new MetricsSource(queueId)); + ReplicationSource rs = new ReplicationSource(); + RegionServerServices rss = setupForAbortTests(rs, conf, + FlakyReplicationEndpoint.class.getName()); try { rs.startup(); - Waiter.waitFor(conf, 1000, () -> FaultyReplicationEndpoint.count > 0); + Waiter.waitFor(conf, 1000, () -> rss.isAborted()); assertFalse(rs.isSourceActive()); - assertTrue(rss.isAborted()); } finally { rs.terminate("Done"); rss.stop("Done"); From 01876071b990063899e0839cdda9745783fe814c Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 30 Sep 2020 09:34:15 -0700 Subject: [PATCH 090/769] HBASE-25109 Add MR Counters to WALPlayer; currently hard to tell if it is doing anything (#2468) Add MR counters so operator can see if WALPlayer run actually did anything. Fix bugs in usage (it enforced two args though usage describes allowing one arg only). Clean up usage output. In particular add note on wal file separator as hbase by default uses the ',' in its WAL file names which could befuddle operator trying to do simple import. Signed-off-by: Huaxiang Sun --- .../apache/hadoop/hbase/mapreduce/Driver.java | 3 +- .../hbase/mapreduce/WALInputFormat.java | 5 +- .../hadoop/hbase/mapreduce/WALPlayer.java | 72 ++++++++++++------- .../hadoop/hbase/mapreduce/TestWALPlayer.java | 4 +- src/main/asciidoc/_chapters/ops_mgt.adoc | 44 ++++++------ 5 files changed, 75 insertions(+), 53 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index d52a31067f42..ed31c8422e7e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -34,6 +34,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { + private Driver() {} public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 1815412721f4..7c4be83a73e9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -154,14 +154,13 @@ public void initialize(InputSplit split, TaskAttemptContext context) WALSplit hsplit = (WALSplit)split; logFile = new Path(hsplit.getLogFileName()); conf = context.getConfiguration(); - LOG.info("Opening reader for "+split); + LOG.info("Opening {} for {}", logFile, split); openReader(logFile); this.startTime = hsplit.getStartTime(); this.endTime = hsplit.getEndTime(); } - private void openReader(Path path) throws IOException - { + private void openReader(Path path) throws IOException { closeReader(); reader = AbstractFSWALProvider.openReader(path, conf); seek(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index bbaa7549fa9a..5b1aac654414 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -58,6 +58,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; + + /** * A tool to replay WAL files as a M/R job. * The WAL can be replayed for a set of tables or all tables, @@ -140,7 +142,22 @@ public void setup(Context context) throws IOException { } /** - * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance. + * Enum for map metrics. Keep it out here rather than inside in the Map + * inner-class so we can find associated properties. + */ + protected static enum Counter { + /** Number of aggregated writes */ + PUTS, + /** Number of aggregated deletes */ + DELETES, + CELLS_READ, + CELLS_WRITTEN, + WALEDITS + } + + /** + * A mapper that writes out {@link Mutation} to be directly applied to + * a running HBase instance. */ protected static class WALMapper extends Mapper { @@ -148,6 +165,7 @@ protected static class WALMapper @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { + context.getCounter(Counter.WALEDITS).increment(1); try { if (tables.isEmpty() || tables.containsKey(key.getTableName())) { TableName targetTable = @@ -157,6 +175,7 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { Delete del = null; Cell lastCell = null; for (Cell cell : value.getCells()) { + context.getCounter(Counter.CELLS_READ).increment(1); // Filtering WAL meta marker entries. if (WALEdit.isMetaEditFamily(cell)) { continue; @@ -172,9 +191,11 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { // row or type changed, write out aggregate KVs. if (put != null) { context.write(tableOut, put); + context.getCounter(Counter.PUTS).increment(1); } if (del != null) { context.write(tableOut, del); + context.getCounter(Counter.DELETES).increment(1); } if (CellUtil.isDelete(cell)) { del = new Delete(CellUtil.cloneRow(cell)); @@ -187,14 +208,17 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { } else { put.add(cell); } + context.getCounter(Counter.CELLS_WRITTEN).increment(1); } lastCell = cell; } // write residual KVs if (put != null) { context.write(tableOut, put); + context.getCounter(Counter.PUTS).increment(1); } if (del != null) { + context.getCounter(Counter.DELETES).increment(1); context.write(tableOut, del); } } @@ -270,7 +294,7 @@ public Job createSubmittableJob(String[] args) throws IOException { setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; - String[] tables = args[1].split(","); + String[] tables = args.length == 1? new String [] {}: args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); @@ -278,7 +302,7 @@ public Job createSubmittableJob(String[] args) throws IOException { throw new IOException("The same number of tables and mapping must be provided."); } } else { - // if not mapping is specified map each table to itself + // if no mapping is specified, map each table to itself tableMap = tables; } conf.setStrings(TABLES_KEY, tables); @@ -349,27 +373,27 @@ private void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: " + NAME + " [options] []"); - System.err.println("Replay all WAL files into HBase."); - System.err.println(" is a comma separated list of tables."); - System.err.println("If no tables (\"\") are specified, all tables are imported."); - System.err.println("(Be careful, hbase:meta entries will be imported in this case.)\n"); - System.err.println("WAL entries can be mapped to new set of tables via ."); - System.err.println(" is a comma separated list of target tables."); - System.err.println("If specified, each table in must have a mapping.\n"); - System.err.println("By default " + NAME + " will load data directly into HBase."); - System.err.println("To generate HFiles for a bulk data load instead, pass the following option:"); - System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); - System.err.println(" (Only one table can be specified, and no mapping is allowed!)"); - System.err.println("Time range options:"); - System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); - System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); - System.err.println(" (The start and the end date of timerange. The dates can be expressed"); - System.err.println(" in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format."); - System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12)"); + System.err.println("Usage: " + NAME + " [options] [ ]"); + System.err.println(" directory of WALs to replay."); + System.err.println(" comma separated list of tables. If no tables specified,"); + System.err.println(" all are imported (even hbase:meta if present)."); + System.err.println(" WAL entries can be mapped to a new set of tables by passing"); + System.err.println(" , a comma separated list of target tables."); + System.err.println(" If specified, each table in must have a mapping."); + System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); + System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); + System.err.println(" Only one table can be specified, and no mapping allowed!"); + System.err.println("To specify a time range, pass:"); + System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); + System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); + System.err.println(" The start and the end date of timerange. The dates can be expressed"); + System.err.println(" in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format."); + System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); - System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); - System.err.println(" Use the specified mapreduce job name for the wal player"); + System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); + System.err.println(" Use the specified mapreduce job name for the wal player"); + System.err.println(" -Dwal.input.separator=' '"); + System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); @@ -387,7 +411,7 @@ public static void main(String[] args) throws Exception { @Override public int run(String[] args) throws Exception { - if (args.length < 2) { + if (args.length < 1) { usage("Wrong number of arguments: " + args.length); System.exit(-1); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 4880ab64e669..432aff1dd044 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -223,8 +223,8 @@ public void testMainMethod() throws Exception { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " []")); + assertTrue(data.toString().contains("Usage: WALPlayer [options] " + + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index d1bc2cf8e0cb..6ea23655d3c7 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -914,7 +914,7 @@ see <<_wal_tools>>. Invoke via: ---- -$ bin/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] []> +$ bin/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] [ ]> ---- For example: @@ -932,29 +932,27 @@ To NOT run WALPlayer as a mapreduce job on your cluster, force it to run all in Running `WALPlayer` with no arguments prints brief usage information: ---- -Usage: WALPlayer [options] [] -Replay all WAL files into HBase. - is a comma separated list of tables. -If no tables ("") are specified, all tables are imported. -(Be careful, hbase:meta entries will be imported in this case.) - -WAL entries can be mapped to new set of tables via . - is a comma separated list of target tables. -If specified, each table in must have a mapping. - -By default WALPlayer will load data directly into HBase. -To generate HFiles for a bulk data load instead, pass the following option: - -Dwal.bulk.output=/path/for/output - (Only one table can be specified, and no mapping is allowed!) -Time range options: - -Dwal.start.time=[date|ms] - -Dwal.end.time=[date|ms] - (The start and the end date of timerange. The dates can be expressed - in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format. - E.g. 1234567890120 or 2009-02-13T23:32:30.12) +Usage: WALPlayer [options] [ ] + directory of WALs to replay. + comma separated list of tables. If no tables specified, + all are imported (even hbase:meta if present). + WAL entries can be mapped to a new set of tables by passing + , a comma separated list of target tables. + If specified, each table in must have a mapping. +To generate HFiles to bulk load instead of loading HBase directly, pass: + -Dwal.bulk.output=/path/for/output + Only one table can be specified, and no mapping allowed! +To specify a time range, pass: + -Dwal.start.time=[date|ms] + -Dwal.end.time=[date|ms] + The start and the end date of timerange. The dates can be expressed + in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format. + E.g. 1234567890120 or 2009-02-13T23:32:30.12 Other options: - -Dmapreduce.job.name=jobName - Use the specified mapreduce job name for the wal player + -Dmapreduce.job.name=jobName + Use the specified mapreduce job name for the wal player + -Dwal.input.separator=' ' + Change WAL filename separator (WAL dir names use default ','.) For performance also consider the following options: -Dmapreduce.map.speculative=false -Dmapreduce.reduce.speculative=false From ee02e673b77c343d2ba72774ee7b3a177572343e Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Thu, 1 Oct 2020 03:08:34 +0800 Subject: [PATCH 091/769] HBASE-25062 The link of "Re:(HBASE-451) Remove HTableDescriptor from HRegionInfo" invalid (#2455) Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang Signed-off-by: stack --- src/main/asciidoc/_chapters/developer.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 3e2e3938f459..a6939920cb45 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -2755,7 +2755,7 @@ However any substantive discussion (as with any off-list project-related discuss ==== Do not edit JIRA comments -Misspellings and/or bad grammar is preferable to the disruption a JIRA comment edit causes: See the discussion at link:http://search-hadoop.com/?q=%5BReopened%5D+%28HBASE-451%29+Remove+HTableDescriptor+from+HRegionInfo&fc_project=HBase[Re:(HBASE-451) Remove HTableDescriptor from HRegionInfo] +Misspellings and/or bad grammar is preferable to the disruption a JIRA comment edit. [[thirdparty]] === The hbase-thirdparty dependency and shading/relocation From 1ce1cf66dd4a0761d3fdd1d1781f83373c633151 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 14:30:27 +0800 Subject: [PATCH 092/769] HBASE-25132 Migrate flaky test jenkins job from Hadoop to hbase (#2485) --- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index e043feeb342d..282b83115883 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { From e663f098d8fd2e53d52117cbd518f0562ce6c61e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 14:32:49 +0800 Subject: [PATCH 093/769] HBASE-25133 Migrate HBase Nightly jenkins job from Hadoop to hbase (#2487) --- dev-support/Jenkinsfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 5aaefd80ff07..c250dcefe604 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { @@ -192,7 +192,7 @@ pipeline { stage ('yetus general check') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { @@ -257,7 +257,7 @@ pipeline { stage ('yetus jdk7 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -338,7 +338,7 @@ pipeline { stage ('yetus jdk8 hadoop2 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -419,7 +419,7 @@ pipeline { stage ('yetus jdk8 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -504,7 +504,7 @@ pipeline { stage ('yetus jdk11 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { From 5351aca8a1e01df4aefb0cfc1c0e1892dcd56caa Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 14:33:02 +0800 Subject: [PATCH 094/769] HBASE-25134 Migrate HBase PreCommit jenkins job from Hadoop to hbase (#2488) --- dev-support/Jenkinsfile_GitHub | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index d314ba45cd9c..801c2771fb95 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -18,7 +18,7 @@ pipeline { agent { - label 'Hadoop' + label 'hbase' } options { @@ -66,7 +66,7 @@ pipeline { stage ('yetus general check') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { @@ -152,7 +152,7 @@ pipeline { stage ('yetus jdk8 Hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { @@ -252,7 +252,7 @@ pipeline { stage ('yetus jdk11 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { From e9de7d9e7e03cc0c46ecb17eaee5b80a9fa7e2ef Mon Sep 17 00:00:00 2001 From: Sanjeet Nishad Date: Thu, 1 Oct 2020 13:24:31 +0530 Subject: [PATCH 095/769] HBASE-24981 Enable table replication fails from 1.x to 2.x if table already exist at peer Closes #2353 Signed-off-by: stack Signed-off-by: Viraj Jasani Signed-off-by: Pankaj Kumar --- .../client/ColumnFamilyDescriptorBuilder.java | 5 --- .../TestColumnFamilyDescriptorBuilder.java | 31 +++++++++++++++++++ 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 3889d32dda54..9a47cb52fa95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -294,11 +294,6 @@ public static Map getDefaultValues() { DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); - DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); - DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); - DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN)); // Do NOT add this key/value by default. NEW_VERSION_BEHAVIOR is NOT defined in hbase1 so // it is not possible to make an hbase1 HCD the same as an hbase2 HCD and so the replication // compare of schemas will fail. It is OK not adding the below to the initial map because of diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index d6ea1b3cef8f..557d2f8dfb6e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -39,6 +39,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import java.util.Map; @Category({ MiscTests.class, SmallTests.class }) public class TestColumnFamilyDescriptorBuilder { @@ -181,4 +182,34 @@ public void testSetTimeToLive() throws HBaseException { builder.setTimeToLive(ttl); Assert.assertEquals(43282800, builder.build().getTimeToLive()); } + + /** + * Test for verifying the ColumnFamilyDescriptorBuilder's default values so that backward + * compatibility with hbase-1.x can be mantained (see HBASE-24981). + */ + @Test + public void testDefaultBuilder() { + final Map defaultValueMap = ColumnFamilyDescriptorBuilder.getDefaultValues(); + assertEquals(defaultValueMap.size(), 11); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOOMFILTER), + BloomType.ROW.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE), "0"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.MAX_VERSIONS), "1"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.MIN_VERSIONS), "0"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.COMPRESSION), + Compression.Algorithm.NONE.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.TTL), + Integer.toString(Integer.MAX_VALUE)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOCKSIZE), + Integer.toString(64 * 1024)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.IN_MEMORY), + Boolean.toString(false)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOCKCACHE), + Boolean.toString(true)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS), + KeepDeletedCells.FALSE.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING), + DataBlockEncoding.NONE.toString()); + + } } From a0b3d6276fe5aec577253aaafd31b73b9e3aaa19 Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Thu, 1 Oct 2020 18:07:28 +0530 Subject: [PATCH 096/769] HBASE-25135 Convert the internal seperator while emitting the memstore read metrics to # (#2486) Signed-off-by: Anoop Sam John --- .../hadoop/hbase/regionserver/MetricsTableSourceImpl.java | 2 +- .../hbase/regionserver/MetricsTableWrapperAggregate.java | 2 +- .../hadoop/hbase/regionserver/MetricsTableWrapperStub.java | 4 ++-- .../hbase/regionserver/MetricsTableWrapperAggregateImpl.java | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index b39e1444dd2f..85f5bded98a8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -337,7 +337,7 @@ private void addGauge(MetricsRecordBuilder mrb, Map metricMap, Str for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY - + entry.getKey().split(MetricsTableWrapperAggregate.UNDERSCORE)[1] + + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] + this.tableNamePrefixPart2 + metricName, metricDesc), entry.getValue()); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index 4b8c46af2c0f..40fd6d8effaf 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -28,7 +28,7 @@ */ @InterfaceAudience.Private public interface MetricsTableWrapperAggregate { - public String UNDERSCORE = "_"; + public String HASH = "#"; /** * Get the number of read requests that have been issued against this table */ diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index 9a860a041d45..dbdc92da8ac4 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -116,14 +116,14 @@ public long getCpRequestsCount(String table) { @Override public Map getMemstoreOnlyRowReadsCount(String table) { Map map = new HashMap(); - map.put("table_info", 3L); + map.put("table#info", 3L); return map; } @Override public Map getMixedRowReadsCount(String table) { Map map = new HashMap(); - map.put("table_info", 3L); + map.put("table#info", 3L); return map; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java index 7b5c6ef9701d..77130b8da4cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java @@ -94,7 +94,7 @@ public void run() { (long) store.getAvgStoreFileAge().getAsDouble() * store.getStorefilesCount(); } mt.storeCount += 1; - tempKey = tbl.getNameAsString() + UNDERSCORE + familyName; + tempKey = tbl.getNameAsString() + HASH + familyName; Long tempVal = mt.perStoreMemstoreOnlyReadCount.get(tempKey); if (tempVal == null) { tempVal = 0L; From 11a336a74aa0f5cac3d96d2b54e22e31c3978081 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 22:15:15 +0800 Subject: [PATCH 097/769] Revert "HBASE-25134 Migrate HBase PreCommit jenkins job from Hadoop to hbase (#2488)" This reverts commit 5351aca8a1e01df4aefb0cfc1c0e1892dcd56caa. --- dev-support/Jenkinsfile_GitHub | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index 801c2771fb95..d314ba45cd9c 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -18,7 +18,7 @@ pipeline { agent { - label 'hbase' + label 'Hadoop' } options { @@ -66,7 +66,7 @@ pipeline { stage ('yetus general check') { agent { node { - label 'hbase' + label 'Hadoop' } } environment { @@ -152,7 +152,7 @@ pipeline { stage ('yetus jdk8 Hadoop3 checks') { agent { node { - label 'hbase' + label 'Hadoop' } } environment { @@ -252,7 +252,7 @@ pipeline { stage ('yetus jdk11 hadoop3 checks') { agent { node { - label 'hbase' + label 'Hadoop' } } environment { From 3b91a15183482a243a443509dfc7385fac856beb Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 1 Oct 2020 10:04:58 -0700 Subject: [PATCH 098/769] HBASE-25091 Move LogComparator from ReplicationSource to AbstractFSWALProvider#.WALsStartTimeComparator (#2449) Give the comparator a more descriptive name, a better location, and make it work even when passed hbase:meta WAL files. Signed-off-by: Duo Zhang Signed-off-by: Guanghao Zhang --- .../RecoveredReplicationSource.java | 4 +- .../regionserver/ReplicationSource.java | 29 +-------- .../hbase/wal/AbstractFSWALProvider.java | 36 +++++++++-- .../hadoop/hbase/wal/TestWALProvider.java | 62 +++++++++++++++++++ 4 files changed, 98 insertions(+), 33 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 00aa026093fa..46cf851b9723 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -64,8 +64,8 @@ protected RecoveredReplicationSourceShipper createNewShipper(String walGroupId, public void locateRecoveredPaths(PriorityBlockingQueue queue) throws IOException { boolean hasPathChanged = false; - PriorityBlockingQueue newPaths = - new PriorityBlockingQueue(queueSizePerGroup, new LogsComparator()); + PriorityBlockingQueue newPaths = new PriorityBlockingQueue(queueSizePerGroup, + new AbstractFSWALProvider.WALStartTimeComparator()); pathsLoop: for (Path path : queue) { if (fs.exists(path)) { // still in same location, don't need to do anything newPaths.add(path); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 82120736bd42..4b034f56a93c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -253,7 +252,8 @@ public void enqueueLog(Path wal) { String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); PriorityBlockingQueue queue = queues.get(walPrefix); if (queue == null) { - queue = new PriorityBlockingQueue<>(queueSizePerGroup, new LogsComparator()); + queue = new PriorityBlockingQueue<>(queueSizePerGroup, + new AbstractFSWALProvider.WALStartTimeComparator()); // make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise // the shipper may quit immediately queue.put(wal); @@ -759,31 +759,6 @@ public boolean isSourceActive() { return !this.server.isStopped() && this.sourceRunning; } - /** - * Comparator used to compare logs together based on their start time - */ - public static class LogsComparator implements Comparator { - - @Override - public int compare(Path o1, Path o2) { - return Long.compare(getTS(o1), getTS(o2)); - } - - /** - *

- * Split a path to get the start time - *

- *

- * For example: 10.20.20.171%3A60020.1277499063250 - *

- * @param p path to split - * @return start time - */ - private static long getTS(Path p) { - return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName()); - } - } - public ReplicationQueueInfo getReplicationQueueInfo() { return replicationQueueInfo; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 94ae70467793..6f9c87b00518 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hbase.wal; +import static org.apache.commons.lang3.StringUtils.isNumeric; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; @@ -418,6 +420,36 @@ public static boolean isMetaFile(String p) { return p != null && p.endsWith(META_WAL_PROVIDER_ID); } + /** + * Comparator used to compare WAL files together based on their start time. + * Just compares start times and nothing else. + */ + public static class WALStartTimeComparator implements Comparator { + @Override + public int compare(Path o1, Path o2) { + return Long.compare(getTS(o1), getTS(o2)); + } + + /** + * Split a path to get the start time + * For example: 10.20.20.171%3A60020.1277499063250 + * Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL + * which adds a '.syncrep' suffix. Check. + * @param p path to split + * @return start time + */ + private static long getTS(Path p) { + String name = p.getName(); + String [] splits = name.split("\\."); + String ts = splits[splits.length - 1]; + if (!isNumeric(ts)) { + // Its a '.meta' or a '.syncrep' suffix. + ts = splits[splits.length - 2]; + } + return Long.parseLong(ts); + } + } + public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); @@ -545,8 +577,4 @@ private static String getWALNameGroupFromWALName(String name, int group) { public static String getWALPrefixFromWALName(String name) { return getWALNameGroupFromWALName(name, 1); } - - public static long getWALStartTimeFromWALName(String name) { - return Long.parseLong(getWALNameGroupFromWALName(name, 2)); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java new file mode 100644 index 000000000000..bc06147d7cca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.wal; + +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import java.util.Comparator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RegionServerTests.class, SmallTests.class}) +public class TestWALProvider { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWALProvider.class); + + /** + * Test start time comparator. + */ + @Test + public void testWALStartTimeComparator() throws IOException { + Path metaPath1 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604319.meta"); + Path metaPath2 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604320.meta"); + Path path3 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.1600304604321"); + Path metaPath4 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604321.meta"); + Comparator c = new AbstractFSWALProvider.WALStartTimeComparator(); + assertTrue(c.compare(metaPath1, metaPath1) == 0); + assertTrue(c.compare(metaPath2, metaPath2) == 0); + assertTrue(c.compare(metaPath1, metaPath2) < 0); + assertTrue(c.compare(metaPath2, metaPath1) > 0); + assertTrue(c.compare(metaPath2, path3) < 0); + assertTrue(c.compare(path3, metaPath4) == 0); + } +} From 5e5166dc40a7e29de7a1c139f170b90903b191ce Mon Sep 17 00:00:00 2001 From: SteNicholas Date: Fri, 2 Oct 2020 21:27:01 +0800 Subject: [PATCH 099/769] HBASE-25120 Remove the deprecated annotation for MetaTableAccessor.getScanForTableName (#2493) Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/MetaTableAccessor.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index c7f7ec197a9e..b9ec944ee9e3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -319,9 +319,7 @@ public static List getTableRegions(Connection connection, TableName * and scan until it hits a new table since that requires parsing the HRI to get the table name. * @param tableName bytes of table's name * @return configured Scan object - * @deprecated This is internal so please remove it when we get a chance. */ - @Deprecated public static Scan getScanForTableName(Connection connection, TableName tableName) { // Start key is just the table name with delimiters byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION); From b0170d0e240f38fb6cedb5e8a34ff08eb9364a6f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 3 Oct 2020 20:48:38 +0800 Subject: [PATCH 100/769] HBASE-25080 Should not use AssignmentManager to test whether a table is enabled or not (#2436) Signed-off-by: Guanghao Zhang Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../master/assignment/AssignmentManager.java | 6 +++--- .../AbstractStateMachineTableProcedure.java | 5 +++++ .../master/procedure/ModifyTableProcedure.java | 2 +- .../procedure/ReopenTableRegionsProcedure.java | 3 +-- .../main/resources/hbase-webapps/master/table.jsp | 5 +++-- .../master/assignment/MockMasterServices.java | 15 ++++----------- 6 files changed, 17 insertions(+), 19 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 5638af5af48f..d2e1eb3e9d42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -386,15 +386,15 @@ public List getFavoredNodes(final RegionInfo regionInfo) { // ============================================================================================ // Table State Manager helpers // ============================================================================================ - TableStateManager getTableStateManager() { + private TableStateManager getTableStateManager() { return master.getTableStateManager(); } - public boolean isTableEnabled(final TableName tableName) { + private boolean isTableEnabled(final TableName tableName) { return getTableStateManager().isTableState(tableName, TableState.State.ENABLED); } - public boolean isTableDisabled(final TableName tableName) { + private boolean isTableDisabled(final TableName tableName) { return getTableStateManager().isTableState(tableName, TableState.State.DISABLED, TableState.State.DISABLING); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java index 1edfc74179ae..9b1dfc6a23a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java @@ -176,6 +176,11 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H } } + protected boolean isTableEnabled(MasterProcedureEnv env) { + return env.getMasterServices().getTableStateManager().isTableState(getTableName(), + TableState.State.ENABLED); + } + /** * Check region is online. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 64f4bf6c84d9..892ef28ef23f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -149,7 +149,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); break; case MODIFY_TABLE_REOPEN_ALL_REGIONS: - if (env.getAssignmentManager().isTableEnabled(getTableName())) { + if (isTableEnabled(env)) { addChildProcedure(new ReopenTableRegionsProcedure(getTableName())); } if (deleteColumnFamilyInModify) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java index 7bf834c62c8c..ffa485d5465c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java @@ -22,7 +22,6 @@ import java.util.Collections; import java.util.List; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.assignment.RegionStateNode; @@ -105,7 +104,7 @@ protected Flow executeFromState(MasterProcedureEnv env, ReopenTableRegionsState throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { switch (state) { case REOPEN_TABLE_REGIONS_GET_REGIONS: - if (!env.getAssignmentManager().isTableEnabled(tableName)) { + if (!isTableEnabled(env)) { LOG.info("Table {} is disabled, give up reopening its regions", tableName); return Flow.NO_MORE_STATE; } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 23eeb3ab740f..25b5979ae8c8 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -53,6 +53,7 @@ import="org.apache.hadoop.hbase.client.RegionLocator" import="org.apache.hadoop.hbase.client.RegionReplicaUtil" import="org.apache.hadoop.hbase.client.Table" + import="org.apache.hadoop.hbase.client.TableState" import="org.apache.hadoop.hbase.client.ColumnFamilyDescriptor" import="org.apache.hadoop.hbase.http.InfoServer" import="org.apache.hadoop.hbase.master.HMaster" @@ -647,14 +648,14 @@ Enabled - <%= master.getAssignmentManager().isTableEnabled(table.getName()) %> + <%= master.getTableStateManager().isTableState(table.getName(), TableState.State.ENABLED) %> Is the table enabled Compaction <% - if (master.getAssignmentManager().isTableEnabled(table.getName())) { + if (master.getTableStateManager().isTableState(table.getName(), TableState.State.ENABLED)) { CompactionState compactionState = master.getCompactionState(table.getName()); %><%= compactionState==null?"UNKNOWN":compactionState %><% } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index 5e78c3dc7758..e899cee3ea47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import static org.mockito.ArgumentMatchers.any; + import java.io.IOException; import java.util.List; import java.util.Map; @@ -61,7 +62,9 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; @@ -105,17 +108,7 @@ public MockMasterServices(Configuration conf, null: new SplitWALManager(this); // Mock an AM. - this.assignmentManager = new AssignmentManager(this, new MockRegionStateStore(this)) { - @Override - public boolean isTableEnabled(final TableName tableName) { - return true; - } - - @Override - public boolean isTableDisabled(final TableName tableName) { - return false; - } - }; + this.assignmentManager = new AssignmentManager(this, new MockRegionStateStore(this)); this.balancer = LoadBalancerFactory.getLoadBalancer(conf); this.serverManager = new ServerManager(this); this.tableStateManager = Mockito.mock(TableStateManager.class); From 3226c1795acc9323b9cc08da48e645dc16127b37 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Sun, 4 Oct 2020 16:02:12 +0530 Subject: [PATCH 101/769] HBASE-25115 HFilePrettyPrinter can't seek to the row which is the first row of a hfile Closes #2473 Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../hbase/io/hfile/HFilePrettyPrinter.java | 10 +++++----- .../io/hfile/TestHFilePrettyPrinter.java | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 93d85af677b8..02efa8e89863 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -322,16 +322,16 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { // scan over file and read key/value's and check if requested HFileScanner scanner = reader.getScanner(false, false, false); fileStats = new KeyValueStatsCollector(); - boolean shouldScanKeysValues = false; - if (this.isSeekToRow) { + boolean shouldScanKeysValues; + if (this.isSeekToRow && !Bytes.equals(row, reader.getFirstRowKey().orElse(null))) { // seek to the first kv on this row - shouldScanKeysValues = - (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1); + shouldScanKeysValues = (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1); } else { shouldScanKeysValues = scanner.seekTo(); } - if (shouldScanKeysValues) + if (shouldScanKeysValues) { scanKeysValues(file, fileStats, scanner, row); + } } // print meta data diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java index 8fab5a3df8d2..c7ac97aa94f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java @@ -108,4 +108,23 @@ public void testHFilePrettyPrinterRootDir() throws Exception { String expectedResult = "Scanning -> " + fileInRootDir + "\n" + "Scanned kv count -> 1000\n"; assertEquals(expectedResult, result); } + + @Test + public void testHFilePrettyPrinterSeekFirstRow() throws Exception { + Path fileNotInRootDir = UTIL.getDataTestDir("hfile"); + TestHRegionServerBulkLoad.createHFile(fs, fileNotInRootDir, cf, fam, value, 1000); + assertNotEquals("directory used is not an HBase root dir", UTIL.getDefaultRootDirPath(), + fileNotInRootDir); + + HFile.Reader reader = + HFile.createReader(fs, fileNotInRootDir, CacheConfig.DISABLED, true, conf); + String firstRowKey = new String(reader.getFirstRowKey().get()); + + System.setOut(ps); + new HFilePrettyPrinter(conf) + .run(new String[] { "-v", "-w" + firstRowKey, String.valueOf(fileNotInRootDir) }); + String result = new String(stream.toByteArray()); + String expectedResult = "Scanning -> " + fileNotInRootDir + "\n" + "Scanned kv count -> 1\n"; + assertEquals(expectedResult, result); + } } From a8096b3ac39b2872ed17e4359255ac42516525ac Mon Sep 17 00:00:00 2001 From: Joseph295 <517536891@qq.com> Date: Mon, 5 Oct 2020 12:39:27 +0800 Subject: [PATCH 102/769] HBASE-25048 [HBCK2] Bypassed parent procedures are not updated in store (#2410) Signed-off-by: Duo Zhang Signed-off-by: stack --- .../org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index f8857859131a..b99f544628bb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -981,7 +981,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur while (current != null) { LOG.debug("Bypassing {}", current); current.bypass(getEnvironment()); - store.update(procedure); + store.update(current); long parentID = current.getParentProcId(); current = getProcedure(parentID); } From 23ce91819aec35aec28116316d4bd5c37889591f Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 5 Oct 2020 15:37:34 +0530 Subject: [PATCH 103/769] HBASE-25147 : Serialize regionNames in ReopenTableRegionsProcedure Closes #2494 Signed-off-by: Duo Zhang --- .../server/master/MasterProcedure.proto | 1 + .../ReopenTableRegionsProcedure.java | 24 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index b18de27a0c9a..59a1d68fda44 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -487,6 +487,7 @@ enum ReopenTableRegionsState { message ReopenTableRegionsStateData { required TableName table_name = 1; repeated RegionLocation region = 2; + repeated bytes region_names = 3; } enum InitMetaState { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java index ffa485d5465c..aa89094501db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java @@ -36,6 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -56,19 +57,19 @@ public class ReopenTableRegionsProcedure // Specify specific regions of a table to reopen. // if specified null, all regions of the table will be reopened. - private final List regionNames; + private List regionNames; private List regions = Collections.emptyList(); private RetryCounter retryCounter; public ReopenTableRegionsProcedure() { - regionNames = null; + regionNames = Collections.emptyList(); } public ReopenTableRegionsProcedure(TableName tableName) { this.tableName = tableName; - this.regionNames = null; + this.regionNames = Collections.emptyList(); } public ReopenTableRegionsProcedure(final TableName tableName, @@ -223,6 +224,17 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO ReopenTableRegionsStateData.Builder builder = ReopenTableRegionsStateData.newBuilder() .setTableName(ProtobufUtil.toProtoTableName(tableName)); regions.stream().map(ProtobufUtil::toRegionLocation).forEachOrdered(builder::addRegion); + if (CollectionUtils.isNotEmpty(regionNames)) { + // As of this writing, wrapping this statement withing if condition is only required + // for backward compatibility as we used to have 'regionNames' as null for cases + // where all regions of given table should be reopened. Now, we have kept emptyList() + // for 'regionNames' to indicate all regions of given table should be reopened unless + // 'regionNames' contains at least one specific region, in which case only list of regions + // that 'regionNames' contain should be reopened, not all regions of given table. + // Now, we don't need this check since we are not dealing with null 'regionNames' and hence, + // guarding by this if condition can be removed in HBase 4.0.0. + regionNames.stream().map(ByteString::copyFrom).forEachOrdered(builder::addRegionNames); + } serializer.serialize(builder.build()); } @@ -233,5 +245,11 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws tableName = ProtobufUtil.toTableName(data.getTableName()); regions = data.getRegionList().stream().map(ProtobufUtil::toRegionLocation) .collect(Collectors.toList()); + if (CollectionUtils.isNotEmpty(data.getRegionNamesList())) { + regionNames = data.getRegionNamesList().stream().map(ByteString::toByteArray) + .collect(Collectors.toList()); + } else { + regionNames = Collections.emptyList(); + } } } From 9ba90e16793a120f211e2eb4403007926cf2354e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 5 Oct 2020 21:29:55 +0800 Subject: [PATCH 104/769] HBASE-25121 Refactor MetaTableAccessor.addRegionsToMeta and its usage places (#2476) Signed-off-by: stack --- .../hadoop/hbase/MetaTableAccessor.java | 87 +++++-------------- .../hadoop/hbase/CatalogFamilyFormat.java | 3 +- .../master/assignment/RegionStateStore.java | 79 +++++++++++++++++ .../hbase/master/janitor/MetaFixer.java | 15 ++-- .../procedure/CreateTableProcedure.java | 15 ++-- .../procedure/DeleteTableProcedure.java | 32 ++++--- .../procedure/ModifyTableProcedure.java | 69 +++------------ .../hbase/util/ServerRegionReplicaUtil.java | 43 ++++----- .../hadoop/hbase/HBaseTestingUtility.java | 2 +- .../hadoop/hbase/TestMetaTableAccessor.java | 58 ++----------- .../hadoop/hbase/client/TestEnableTable.java | 6 +- .../assignment/TestAssignmentManager.java | 4 +- .../assignment/TestRegionStateStore.java | 37 ++++++++ 13 files changed, 217 insertions(+), 233 deletions(-) diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index b9ec944ee9e3..7ec2a22e7833 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.ClientMetaTableAccessor.QueryType; import org.apache.hadoop.hbase.client.Connection; @@ -247,7 +247,7 @@ public static Result scanByRegionEncodedName(Connection connection, String regio throws IOException { RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); - Scan scan = getMetaScan(connection, 1); + Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setFilter(rowFilter); try (Table table = getMetaHTable(connection); ResultScanner resultScanner = table.getScanner(scan)) { @@ -320,24 +320,23 @@ public static List getTableRegions(Connection connection, TableName * @param tableName bytes of table's name * @return configured Scan object */ - public static Scan getScanForTableName(Connection connection, TableName tableName) { + public static Scan getScanForTableName(Configuration conf, TableName tableName) { // Start key is just the table name with delimiters byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION); // Stop key appends the smallest possible char to the table name byte[] stopKey = ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION); - Scan scan = getMetaScan(connection, -1); + Scan scan = getMetaScan(conf, -1); scan.withStartRow(startKey); scan.withStopRow(stopKey); return scan; } - private static Scan getMetaScan(Connection connection, int rowUpperLimit) { + private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { Scan scan = new Scan(); - int scannerCaching = connection.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING, + int scannerCaching = conf.getInt(HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); - if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS)) { + if (conf.getBoolean(HConstants.USE_META_REPLICAS, HConstants.DEFAULT_USE_META_REPLICAS)) { scan.setConsistency(Consistency.TIMELINE); } if (rowUpperLimit > 0) { @@ -469,7 +468,7 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, final ClientMetaTableAccessor.Visitor visitor) throws IOException { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; - Scan scan = getMetaScan(connection, rowUpperLimit); + Scan scan = getMetaScan(connection.getConfiguration(), rowUpperLimit); for (byte[] family : type.getFamilies()) { scan.addFamily(family); @@ -525,7 +524,7 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR private static RegionInfo getClosestRegionInfo(Connection connection, @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); - Scan scan = getMetaScan(connection, 1); + Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setReversed(true); scan.withStartRow(searchRow); try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { @@ -733,37 +732,6 @@ private static void deleteFromMetaTable(final Connection connection, final List< } } - /** - * Deletes some replica columns corresponding to replicas for the passed rows - * @param metaRows rows in hbase:meta - * @param replicaIndexToDeleteFrom the replica ID we would start deleting from - * @param numReplicasToRemove how many replicas to remove - * @param connection connection we're using to access meta table - */ - public static void removeRegionReplicasFromMeta(Set metaRows, - int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection) - throws IOException { - int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove; - for (byte[] row : metaRows) { - long now = EnvironmentEdgeManager.currentTime(); - Delete deleteReplicaLocations = new Delete(row); - for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) { - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getSeqNumColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getStartCodeColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerNameColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getRegionStateColumn(i), now); - } - - deleteFromMetaTable(connection, deleteReplicaLocations); - } - } - public static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.STATE_QUALIFIER) @@ -804,22 +772,6 @@ public static void addSplitsToParent(Connection connection, RegionInfo regionInf } } - /** - * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this - * does not add its daughter's as different rows, but adds information about the daughters in the - * same row as the parent. Use - * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} if - * you want to do that. - * @param connection connection we're using - * @param regionInfo region information - * @throws IOException if problem connecting or updating meta - */ - @VisibleForTesting - public static void addRegionToMeta(Connection connection, RegionInfo regionInfo) - throws IOException { - addRegionsToMeta(connection, Collections.singletonList(regionInfo), 1); - } - /** * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is * CLOSED. @@ -845,17 +797,18 @@ public static void addRegionsToMeta(Connection connection, List regi int regionReplication, long ts) throws IOException { List puts = new ArrayList<>(); for (RegionInfo regionInfo : regionInfos) { - if (RegionReplicaUtil.isDefaultReplica(regionInfo)) { - Put put = makePutFromRegionInfo(regionInfo, ts); - // New regions are added with initial state of CLOSED. - addRegionStateToPut(put, RegionState.State.CLOSED); - // Add empty locations for region replicas so that number of replicas can be cached - // whenever the primary region is looked up from meta - for (int i = 1; i < regionReplication; i++) { - addEmptyLocation(put, i); - } - puts.add(put); + if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) { + continue; + } + Put put = makePutFromRegionInfo(regionInfo, ts); + // New regions are added with initial state of CLOSED. + addRegionStateToPut(put, RegionState.State.CLOSED); + // Add empty locations for region replicas so that number of replicas can be cached + // whenever the primary region is looked up from meta + for (int i = 1; i < regionReplication; i++) { + addEmptyLocation(put, i); } + puts.add(put); } putsToMetaTable(connection, puts); LOG.info("Added {} regions to meta.", puts.size()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index d0ee3dc83326..16337072aa7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -311,8 +311,7 @@ public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) { * @param replicaId the replicaId of the region * @return a byte[] for state qualifier */ - @VisibleForTesting - static byte[] getRegionStateColumn(int replicaId) { + public static byte[] getRegionStateColumn(int replicaId) { return replicaId == 0 ? HConstants.STATE_QUALIFIER : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 935f61abd2f1..500e5ec79da0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -545,6 +547,83 @@ public void overwriteRegions(List regionInfos, int regionReplication LOG.debug("Overwritten regions: {} ", regionInfos); } + /** + * Update region replicas if necessary by adding new replica locations or removing unused region + * replicas + */ + public void updateRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + throws IOException { + if (newReplicaCount < oldReplicaCount) { + removeRegionReplicas(tableName, oldReplicaCount, newReplicaCount); + } else if (newReplicaCount > oldReplicaCount) { + addRegionReplicas(tableName, oldReplicaCount, newReplicaCount); + } + } + + private Scan getScanForUpdateRegionReplicas(TableName tableName) { + return MetaTableAccessor.getScanForTableName(master.getConfiguration(), tableName) + .addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + } + + private void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + throws IOException { + Scan scan = getScanForUpdateRegionReplicas(tableName); + List deletes = new ArrayList<>(); + long now = EnvironmentEdgeManager.currentTime(); + try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); + if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { + continue; + } + Delete delete = new Delete(result.getRow()); + for (int i = newReplicaCount; i < oldReplicaCount; i++) { + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getRegionStateColumn(i), + now); + } + deletes.add(delete); + } + debugLogMutations(deletes); + metaTable.delete(deletes); + } + } + + private void addRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + throws IOException { + Scan scan = getScanForUpdateRegionReplicas(tableName); + List puts = new ArrayList<>(); + long now = EnvironmentEdgeManager.currentTime(); + try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); + if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { + continue; + } + Put put = new Put(result.getRow(), now); + for (int i = oldReplicaCount; i < newReplicaCount; i++) { + MetaTableAccessor.addEmptyLocation(put, i); + } + puts.add(put); + } + debugLogMutations(puts); + metaTable.put(puts); + } + } + // ========================================================================== // Table Descriptors helpers // ========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index 6f5162775da1..f9dc1ccb5aaf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -29,6 +29,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -190,8 +192,8 @@ private static List createMetaEntries(final MasterServices masterSer // Add replicas if needed // we need to create regions with replicaIds starting from 1 - List newRegions = RegionReplicaUtil.addReplicas( - Collections.singletonList(regionInfo), 1, td.getRegionReplication()); + List newRegions = RegionReplicaUtil + .addReplicas(Collections.singletonList(regionInfo), 1, td.getRegionReplication()); // Add regions to META MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), newRegions, @@ -199,12 +201,13 @@ private static List createMetaEntries(final MasterServices masterSer // Setup replication for region replicas if needed if (td.getRegionReplication() > 1) { - ServerRegionReplicaUtil.setupRegionReplicaReplication( - masterServices.getConfiguration()); + ServerRegionReplicaUtil.setupRegionReplicaReplication(masterServices); } - return Either., IOException>ofLeft(newRegions); + return Either., IOException> ofLeft(newRegions); } catch (IOException e) { - return Either., IOException>ofRight(e); + return Either., IOException> ofRight(e); + } catch (ReplicationException e) { + return Either., IOException> ofRight(new HBaseIOException(e)); } }) .collect(Collectors.toList()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index e7162d9b3add..3f171ee694d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableExistsException; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -363,23 +365,26 @@ protected static void moveTempDirectoryToHBaseRoot( } protected static List addTableToMeta(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, - final List regions) throws IOException { + final TableDescriptor tableDescriptor, final List regions) throws IOException { assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions; ProcedureSyncWait.waitMetaRegions(env); // Add replicas if needed // we need to create regions with replicaIds starting from 1 - List newRegions = RegionReplicaUtil.addReplicas(regions, 1, - tableDescriptor.getRegionReplication()); + List newRegions = + RegionReplicaUtil.addReplicas(regions, 1, tableDescriptor.getRegionReplication()); // Add regions to META addRegionsToMeta(env, tableDescriptor, newRegions); // Setup replication for region replicas if needed if (tableDescriptor.getRegionReplication() > 1) { - ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + try { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); + } catch (ReplicationException e) { + throw new HBaseIOException(e); + } } return newRegions; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 5b118a4f37c5..9cfce0ce3632 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -40,12 +39,14 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -357,22 +358,29 @@ protected static void deleteFromFs(final MasterProcedureEnv env, /** * There may be items for this table still up in hbase:meta in the case where the info:regioninfo * column was empty because of some write error. Remove ALL rows from hbase:meta that have to do - * with this table. See HBASE-12980. + * with this table. + *

+ * See HBASE-12980. */ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final TableName tableName) - throws IOException { - Connection connection = env.getMasterServices().getConnection(); - Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { - List deletes = new ArrayList<>(); - try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { - for (Result result : resScanner) { - deletes.add(new Delete(result.getRow())); + throws IOException { + Scan tableScan = MetaTableAccessor.getScanForTableName(env.getMasterConfiguration(), tableName) + .setFilter(new KeyOnlyFilter()); + long now = EnvironmentEdgeManager.currentTime(); + List deletes = new ArrayList<>(); + try ( + Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME); + ResultScanner scanner = metaTable.getScanner(tableScan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; } + deletes.add(new Delete(result.getRow(), now)); } if (!deletes.isEmpty()) { - LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " - + TableName.META_TABLE_NAME); + LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " + + TableName.META_TABLE_NAME); metaTable.delete(deletes); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 892ef28ef23f..9b29d30b9b36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Supplier; @@ -29,20 +28,15 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -346,8 +340,6 @@ private static boolean isDeleteColumnFamily(TableDescriptor originalDescriptor, * Action before modifying table. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void preModify(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { @@ -357,7 +349,6 @@ private void preModify(final MasterProcedureEnv env, final ModifyTableState stat /** * Update descriptor * @param env MasterProcedureEnv - * @throws IOException **/ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor); @@ -366,7 +357,6 @@ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOExcept /** * Removes from hdfs the families that are not longer present in the new table descriptor. * @param env MasterProcedureEnv - * @throws IOException */ private void deleteFromFs(final MasterProcedureEnv env, final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor) @@ -386,61 +376,28 @@ private void deleteFromFs(final MasterProcedureEnv env, /** * update replica column families if necessary. - * @param env MasterProcedureEnv - * @throws IOException */ - private void updateReplicaColumnsIfNeeded( - final MasterProcedureEnv env, - final TableDescriptor oldTableDescriptor, - final TableDescriptor newTableDescriptor) throws IOException { + private void updateReplicaColumnsIfNeeded(MasterProcedureEnv env, + TableDescriptor oldTableDescriptor, TableDescriptor newTableDescriptor) throws IOException { final int oldReplicaCount = oldTableDescriptor.getRegionReplication(); final int newReplicaCount = newTableDescriptor.getRegionReplication(); - - if (newReplicaCount < oldReplicaCount) { - Set tableRows = new HashSet<>(); - Connection connection = env.getMasterServices().getConnection(); - Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName()); - scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { - ResultScanner resScanner = metaTable.getScanner(scan); - for (Result result : resScanner) { - tableRows.add(result.getRow()); - } - MetaTableAccessor.removeRegionReplicasFromMeta( - tableRows, - newReplicaCount, - oldReplicaCount - newReplicaCount, - connection); - } - } - if (newReplicaCount > oldReplicaCount) { - Connection connection = env.getMasterServices().getConnection(); - // Get the existing table regions - List existingTableRegions = - MetaTableAccessor.getTableRegions(connection, getTableName()); - // add all the new entries to the meta table - addRegionsToMeta(env, newTableDescriptor, existingTableRegions); - if (oldReplicaCount <= 1) { - // The table has been newly enabled for replica. So check if we need to setup - // region replication - ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + env.getAssignmentManager().getRegionStateStore().updateRegionReplicas(getTableName(), + oldReplicaCount, newReplicaCount); + if (newReplicaCount > oldReplicaCount && oldReplicaCount <= 1) { + // The table has been newly enabled for replica. So check if we need to setup + // region replication + try { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); + } catch (ReplicationException e) { + throw new HBaseIOException(e); } } } - private static void addRegionsToMeta(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, final List regionInfos) - throws IOException { - MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), regionInfos, - tableDescriptor.getRegionReplication()); - } /** * Action after modifying table. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void postModify(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { @@ -451,8 +408,6 @@ private void postModify(final MasterProcedureEnv env, final ModifyTableState sta * Coprocessor Action. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index b83749d9c337..fbd8d30bba66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -22,16 +22,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.Reference; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint; import org.apache.hadoop.hbase.zookeeper.ZKConfig; @@ -155,34 +154,24 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, /** * Create replication peer for replicating to region replicas if needed. - * @param conf configuration to use - * @throws IOException + *

+ * This methods should only be called at master side. */ - public static void setupRegionReplicaReplication(Configuration conf) throws IOException { - if (!isRegionReplicaReplicationEnabled(conf)) { + public static void setupRegionReplicaReplication(MasterServices services) + throws IOException, ReplicationException { + if (!isRegionReplicaReplicationEnabled(services.getConfiguration())) { return; } - - try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { - ReplicationPeerConfig peerConfig = null; - try { - peerConfig = admin.getReplicationPeerConfig(REGION_REPLICA_REPLICATION_PEER); - } catch (ReplicationPeerNotFoundException e) { - LOG.warn( - "Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + " not exist", - e); - } - - if (peerConfig == null) { - LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER - + " not exist. Creating..."); - peerConfig = new ReplicationPeerConfig(); - peerConfig.setClusterKey(ZKConfig.getZooKeeperClusterKey(conf)); - peerConfig.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()); - admin.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig); - } + if (services.getReplicationPeerManager().getPeerConfig(REGION_REPLICA_REPLICATION_PEER) + .isPresent()) { + return; } + LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + + " not exist. Creating..."); + ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(services.getConfiguration())) + .setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()).build(); + services.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig, true); } public static boolean isRegionReplicaReplicationEnabled(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index f60acd732334..528b155cb94c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2489,7 +2489,7 @@ public List createMultiRegionsInMeta(final Configuration conf, .setStartKey(startKeys[i]) .setEndKey(startKeys[j]) .build(); - MetaTableAccessor.addRegionToMeta(getConnection(), hri); + MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1); newRegions.add(hri); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java index dc4b6a85a9b9..28ce7d8dae0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java @@ -29,9 +29,8 @@ import static org.mockito.Mockito.verify; import java.io.IOException; -import java.util.HashMap; +import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; @@ -67,7 +66,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** * Test {@link org.apache.hadoop.hbase.MetaTableAccessor}. @@ -143,9 +141,11 @@ void metaTask() throws Throwable { } }; MetaTask writer = new MetaTask(connection, "writer") { + @Override - void metaTask() throws Throwable { - MetaTableAccessor.addRegionToMeta(connection, regions.get(0)); + void metaTask() throws IOException { + MetaTableAccessor.addRegionsToMeta(connection, Collections.singletonList(regions.get(0)), + 1); LOG.info("Wrote " + regions.get(0).getEncodedName()); } }; @@ -377,44 +377,6 @@ public static void assertEmptyMetaLocation(Table meta, byte[] row, int replicaId assertEquals(0, startCodeCell.getValueLength()); } - @Test - public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException { - long regionId = System.currentTimeMillis(); - RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); - - Table meta = MetaTableAccessor.getMetaHTable(connection); - try { - List regionInfos = Lists.newArrayList(primary); - MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3); - MetaTableAccessor.removeRegionReplicasFromMeta(Sets.newHashSet(primary.getRegionName()), 1, 2, - connection); - Get get = new Get(primary.getRegionName()); - Result result = meta.get(get); - for (int replicaId = 0; replicaId < 3; replicaId++) { - Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerColumn(replicaId)); - Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getStartCodeColumn(replicaId)); - Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getRegionStateColumn(replicaId)); - Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerNameColumn(replicaId)); - if (replicaId == 0) { - assertNotNull(stateCell); - } else { - assertNull(serverCell); - assertNull(startCodeCell); - assertNull(stateCell); - assertNull(snCell); - } - } - } finally { - meta.close(); - } - } - @Test public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOException { long regionId = System.currentTimeMillis(); @@ -434,14 +396,6 @@ public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOE } } - private Map getMapOfRegionsToSeqNum(RegionInfo... regions) { - Map mids = new HashMap<>(regions.length); - for (RegionInfo region : regions) { - mids.put(region, -1L); - } - return mids; - } - @Test public void testMetaScanner() throws Exception { LOG.info("Starting " + name.getMethodName()); @@ -454,7 +408,7 @@ public void testMetaScanner() throws Exception { UTIL.createTable(tableName, FAMILY, SPLIT_KEYS); Table table = connection.getTable(tableName); // Make sure all the regions are deployed - UTIL.countRows(table); + HBaseTestingUtility.countRows(table); ClientMetaTableAccessor.Visitor visitor = mock(ClientMetaTableAccessor.Visitor.class); doReturn(true).when(visitor).visit((Result) anyObject()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java index 166fade9b654..25e8be246448 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java @@ -94,7 +94,7 @@ public void testDeleteForSureClearsAllTableRowsFromMeta() // content from a few of the rows. try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { try (ResultScanner scanner = metaTable.getScanner( - MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { + MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) { for (Result result : scanner) { // Just delete one row. Delete d = new Delete(result.getRow()); @@ -114,8 +114,8 @@ public void testDeleteForSureClearsAllTableRowsFromMeta() fail("Got an exception while deleting " + tableName); } int rowCount = 0; - try (ResultScanner scanner = metaTable - .getScanner(MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { + try (ResultScanner scanner = metaTable.getScanner( + MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) { for (Result result : scanner) { LOG.info("Found when none expected: " + result); rowCount++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java index 0f4e97fd7532..b7dd87b54e0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.util.Collections; import java.util.concurrent.Executors; import java.util.concurrent.Future; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -299,7 +300,8 @@ public void testLoadRegionFromMetaAfterRegionManuallyAdded() throws Exception { RegionInfo hri = createRegionInfo(tableName, 1); assertNull("RegionInfo was just instantiated by the test, but " + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); - MetaTableAccessor.addRegionToMeta(this.util.getConnection(), hri); + MetaTableAccessor.addRegionsToMeta(this.util.getConnection(), Collections.singletonList(hri), + 1); assertNull("RegionInfo was manually added in META, but " + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); hri = am.loadRegionFromMeta(hri.getEncodedName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index 05451260a9be..a53771d46c7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -394,4 +394,41 @@ public void testAddMergeRegions() throws IOException { previousQualifier = qualifier; } } + + @Test + public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException { + long regionId = System.currentTimeMillis(); + TableName tableName = name.getTableName(); + RegionInfo primary = RegionInfoBuilder.newBuilder(tableName) + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); + + try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) { + List regionInfos = Lists.newArrayList(primary); + MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3); + final RegionStateStore regionStateStore = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + regionStateStore.updateRegionReplicas(tableName, 3, 1); + Get get = new Get(primary.getRegionName()); + Result result = meta.get(get); + for (int replicaId = 0; replicaId < 3; replicaId++) { + Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getServerColumn(replicaId)); + Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getStartCodeColumn(replicaId)); + Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getRegionStateColumn(replicaId)); + Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getServerNameColumn(replicaId)); + if (replicaId == 0) { + assertNotNull(stateCell); + } else { + assertNull(serverCell); + assertNull(startCodeCell); + assertNull(stateCell); + assertNull(snCell); + } + } + } + } } From 16251dbb53b0216ea7e48e75b770cdea519c9ce8 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Mon, 5 Oct 2020 11:51:59 -0700 Subject: [PATCH 105/769] HBASE-25143 Remove branch-1.3 from precommit and docs (#2491) Following the announcement [0] to EOL branch-1.3, update the precommit script to not consider this branch any longer, and refresh mentions of this branch in the doc. [0]: https://lists.apache.org/thread.html/r9552e9085aaac2a43f8b26b866d34825a84a9be7f19118ac560d14de%40%3Cuser.hbase.apache.org%3E Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel --- dev-support/hbase-personality.sh | 9 +-------- src/main/asciidoc/_chapters/community.adoc | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 6f1355cf31a1..d9d11a83befa 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -553,14 +553,7 @@ function hadoopcheck_rebuild # All supported Hadoop versions that we want to test the compilation with # See the Hadoop section on prereqs in the HBase Reference Guide - if [[ "${PATCH_BRANCH}" = branch-1.3 ]]; then - yetus_info "Setting Hadoop 2 versions to test based on branch-1.3 rules." - if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.7" - else - hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7" - fi - elif [[ "${PATCH_BRANCH}" = branch-1.4 ]]; then + if [[ "${PATCH_BRANCH}" = branch-1.4 ]]; then yetus_info "Setting Hadoop 2 versions to test based on branch-1.4 rules." if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then hbase_hadoop2_versions="2.7.7" diff --git a/src/main/asciidoc/_chapters/community.adoc b/src/main/asciidoc/_chapters/community.adoc index 339fa6e90b7c..3db648238a05 100644 --- a/src/main/asciidoc/_chapters/community.adoc +++ b/src/main/asciidoc/_chapters/community.adoc @@ -43,13 +43,18 @@ See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts [[hbase.fix.version.in.jira]] .How to set fix version in JIRA on issue resolve -Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set versions in JIRA when we resolve an issue. -If master is going to be 2.0.0, and branch-1 1.4.0 then: - -* Commit only to master: Mark with 2.0.0 -* Commit to branch-1 and master: Mark with 2.0.0, and 1.4.0 -* Commit to branch-1.3, branch-1, and master: Mark with 2.0.0, 1.4.0, and 1.3.x -* Commit site fixes: no version +Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set versions in JIRA when we +resolve an issue. If master is going to be 3.0.0, branch-2 will be 2.4.0, and branch-1 will be +1.7.0 then: + +* Commit only to master (i.e., backward-incompatible new feature): Mark with 3.0.0 +* Commit only to master and branch-2 (i.e., backward-compatible new feature, applicable only to + 2.x+): Mark with 3.0.0 and 2.4.0 +* Commit to master, branch-2, and branch-1 (i.e., backward-compatible new feature, applicable + everywhere): Mark with 3.0.0, 2.4.0, and 1.7.0 +* Commit to master, branch-2, and branch-2.3, branch-2.2, branch-2, branch-1.4 (i.e., bug fix + applicable to all active release lines): Mark with 3.0.0, 2.4.0, 2.3.x, 2.2.x, 1.7.0, and 1.4.x +* Commit a fix to the website: no version [[hbase.when.to.close.jira]] .Policy on when to set a RESOLVED JIRA as CLOSED From 81f2cc5089c460ba56afceeb40dcb5c604c1b91e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 6 Oct 2020 08:35:16 +0800 Subject: [PATCH 106/769] HBASE-25154 Set java.io.tmpdir to project build directory to avoid writing std*deferred files to /tmp (#2502) Signed-off-by: stack Signed-off-by: Viraj Jasani Signed-off-by: Sean Busbey --- pom.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pom.xml b/pom.xml index 088a7e4898e4..75fa9ceb3612 100755 --- a/pom.xml +++ b/pom.xml @@ -761,6 +761,7 @@ ${test.output.tofile} ${test.build.classes} + ${test.tmp.dir} ${project.build.directory}/test-classes + ${project.build.directory} yyyy-MM-dd'T'HH:mm:ss'Z' From aff8bbf0cb7a6c78c62df5ec5fc29bbaca94a410 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Thu, 8 Oct 2020 17:04:48 +0900 Subject: [PATCH 113/769] HBASE-25160 Refactor AccessController and VisibilityController (#2506) Signed-off-by: stack --- .../security/access/AccessController.java | 66 ++++--------------- .../visibility/VisibilityController.java | 66 +------------------ 2 files changed, 13 insertions(+), 119 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index cb664bb2d2f8..3779903f869a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -429,7 +429,6 @@ private enum OpType { DELETE("delete"), CHECK_AND_PUT("checkAndPut"), CHECK_AND_DELETE("checkAndDelete"), - INCREMENT_COLUMN_VALUE("incrementColumnValue"), APPEND("append"), INCREMENT("increment"); @@ -1503,18 +1502,27 @@ public void preBatchMutate(ObserverContext c, // We have a failure with table, cf and q perm checks and now giving a chance for cell // perm check OpType opType; + long timestamp; if (m instanceof Put) { checkForReservedTagPresence(user, m); opType = OpType.PUT; + timestamp = m.getTimestamp(); } else if (m instanceof Delete) { opType = OpType.DELETE; + timestamp = m.getTimestamp(); + } else if (m instanceof Increment) { + opType = OpType.INCREMENT; + timestamp = ((Increment) m).getTimeRange().getMax(); + } else if (m instanceof Append) { + opType = OpType.APPEND; + timestamp = ((Append) m).getTimeRange().getMax(); } else { - // If the operation type is not Put or Delete, do nothing + // If the operation type is not Put/Delete/Increment/Append, do nothing continue; } AuthResult authResult = null; if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), - m.getFamilyCellMap(), m.getTimestamp(), Action.WRITE)) { + m.getFamilyCellMap(), timestamp, Action.WRITE)) { authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap()); } else { @@ -1695,32 +1703,6 @@ public Result preAppend(ObserverContext c, Append return null; } - @Override - public Result preAppendAfterRowLock(final ObserverContext c, - final Append append) throws IOException { - if (append.getAttribute(CHECK_COVERING_PERM) != null) { - // We had failure with table, cf and q perm checks and now giving a chance for cell - // perm check - TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); - AuthResult authResult = null; - User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.APPEND, c.getEnvironment(), append.getRow(), - append.getFamilyCellMap(), append.getTimeRange().getMax(), Action.WRITE)) { - authResult = AuthResult.allow(OpType.APPEND.toString(), - "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap()); - } else { - authResult = AuthResult.deny(OpType.APPEND.toString(), - "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap()); - } - AccessChecker.logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - return null; - } - @Override public Result preIncrement(final ObserverContext c, final Increment increment) @@ -1756,32 +1738,6 @@ public Result preIncrement(final ObserverContext c return null; } - @Override - public Result preIncrementAfterRowLock(final ObserverContext c, - final Increment increment) throws IOException { - if (increment.getAttribute(CHECK_COVERING_PERM) != null) { - // We had failure with table, cf and q perm checks and now giving a chance for cell - // perm check - TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); - AuthResult authResult = null; - User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.INCREMENT, c.getEnvironment(), increment.getRow(), - increment.getFamilyCellMap(), increment.getTimeRange().getMax(), Action.WRITE)) { - authResult = AuthResult.allow(OpType.INCREMENT.toString(), "Covering cell set", - user, Action.WRITE, table, increment.getFamilyCellMap()); - } else { - authResult = AuthResult.deny(OpType.INCREMENT.toString(), "Covering cell set", - user, Action.WRITE, table, increment.getFamilyCellMap()); - } - AccessChecker.logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - return null; - } - @Override public List> postIncrementBeforeWAL( ObserverContext ctx, Mutation mutation, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 02ed4dd1df0b..37f25a83ea72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -45,11 +45,9 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -69,7 +67,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.FilterList; @@ -323,7 +320,7 @@ public void preBatchMutate(ObserverContext c, } } } - if (!sanityFailure) { + if (!sanityFailure && (m instanceof Put || m instanceof Delete)) { if (cellVisibility != null) { String labelsExp = cellVisibility.getExpression(); List visibilityTags = labelCache.get(labelsExp); @@ -360,7 +357,7 @@ public void preBatchMutate(ObserverContext c, if (m instanceof Put) { Put p = (Put) m; p.add(cell); - } else if (m instanceof Delete) { + } else { Delete d = (Delete) m; d.add(cell); } @@ -470,35 +467,6 @@ private Pair checkForReservedVisibilityTagPresence(Cell cell, return pair; } - /** - * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This - * tag type is reserved and should not be explicitly set by user. There are - * two versions of this method one that accepts pair and other without pair. - * In case of preAppend and preIncrement the additional operations are not - * needed like checking for STRING_VIS_TAG_TYPE and hence the API without pair - * could be used. - * - * @param cell - * @throws IOException - */ - private boolean checkForReservedVisibilityTagPresence(Cell cell) throws IOException { - // Bypass this check when the operation is done by a system/super user. - // This is done because, while Replication, the Cells coming to the peer - // cluster with reserved - // typed tags and this is fine and should get added to the peer cluster - // table - if (isSystemOrSuperUser()) { - return true; - } - Iterator tagsItr = PrivateCellUtil.tagsIterator(cell); - while (tagsItr.hasNext()) { - if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) { - return false; - } - } - return true; - } - private void removeReplicationVisibilityTag(List tags) throws IOException { Iterator iterator = tags.iterator(); while (iterator.hasNext()) { @@ -657,36 +625,6 @@ private boolean isSystemOrSuperUser() throws IOException { return Superusers.isSuperUser(VisibilityUtils.getActiveUser()); } - @Override - public Result preAppend(ObserverContext e, Append append) - throws IOException { - // If authorization is not enabled, we don't care about reserved tags - if (!authorizationEnabled) { - return null; - } - for (CellScanner cellScanner = append.cellScanner(); cellScanner.advance();) { - if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { - throw new FailedSanityCheckException("Append contains cell with reserved type tag"); - } - } - return null; - } - - @Override - public Result preIncrement(ObserverContext e, Increment increment) - throws IOException { - // If authorization is not enabled, we don't care about reserved tags - if (!authorizationEnabled) { - return null; - } - for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) { - if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { - throw new FailedSanityCheckException("Increment contains cell with reserved type tag"); - } - } - return null; - } - @Override public List> postIncrementBeforeWAL( ObserverContext ctx, Mutation mutation, From 0d63318f10638af6aaac16616ca741be57608e3f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 8 Oct 2020 21:35:29 +0800 Subject: [PATCH 114/769] HBASE-25124 Support changing region replica count without disabling table (#2497) Signed-off-by: stack --- .../server/master/MasterProcedure.proto | 2 + .../master/assignment/AssignmentManager.java | 59 ++++--- .../master/assignment/RegionStateStore.java | 41 +---- .../procedure/EnableTableProcedure.java | 53 +----- .../procedure/ModifyTableProcedure.java | 72 +++++--- .../hadoop/hbase/HBaseTestingUtility.java | 2 - .../hadoop/hbase/client/TestAdmin3.java | 20 --- .../assignment/TestRegionStateStore.java | 2 +- .../procedure/TestModifyTableProcedure.java | 4 +- .../TestRegionReplicasWithModifyTable.java | 155 ++++++------------ 10 files changed, 157 insertions(+), 253 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 59a1d68fda44..8d8b9af009cd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -72,6 +72,8 @@ enum ModifyTableState { MODIFY_TABLE_DELETE_FS_LAYOUT = 5; MODIFY_TABLE_POST_OPERATION = 6; MODIFY_TABLE_REOPEN_ALL_REGIONS = 7; + MODIFY_TABLE_CLOSE_EXCESS_REPLICAS = 8; + MODIFY_TABLE_ASSIGN_NEW_REPLICAS = 9; } message ModifyTableStateData { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index d2e1eb3e9d42..fb64514a3377 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -870,32 +870,47 @@ private TransitRegionStateProcedure[] createAssignProcedures( .sorted(AssignmentManager::compare).toArray(TransitRegionStateProcedure[]::new); } + // for creating unassign TRSP when disabling a table or closing excess region replicas + private TransitRegionStateProcedure forceCreateUnssignProcedure(RegionStateNode regionNode) { + regionNode.lock(); + try { + if (!regionStates.include(regionNode, false) || + regionStates.isRegionOffline(regionNode.getRegionInfo())) { + return null; + } + // As in DisableTableProcedure or ModifyTableProcedure, we will hold the xlock for table, so + // we can make sure that this procedure has not been executed yet, as TRSP will hold the + // shared lock for table all the time. So here we will unset it and when it is actually + // executed, it will find that the attach procedure is not itself and quit immediately. + if (regionNode.getProcedure() != null) { + regionNode.unsetProcedure(regionNode.getProcedure()); + } + return regionNode.setProcedure(TransitRegionStateProcedure.unassign(getProcedureEnvironment(), + regionNode.getRegionInfo())); + } finally { + regionNode.unlock(); + } + } + /** * Called by DisableTableProcedure to unassign all the regions for a table. */ public TransitRegionStateProcedure[] createUnassignProceduresForDisabling(TableName tableName) { - return regionStates.getTableRegionStateNodes(tableName).stream().map(regionNode -> { - regionNode.lock(); - try { - if (!regionStates.include(regionNode, false) || - regionStates.isRegionOffline(regionNode.getRegionInfo())) { - return null; - } - // As in DisableTableProcedure, we will hold the xlock for table, so we can make sure that - // this procedure has not been executed yet, as TRSP will hold the shared lock for table all - // the time. So here we will unset it and when it is actually executed, it will find that - // the attach procedure is not itself and quit immediately. - if (regionNode.getProcedure() != null) { - regionNode.unsetProcedure(regionNode.getProcedure()); - } - TransitRegionStateProcedure proc = TransitRegionStateProcedure - .unassign(getProcedureEnvironment(), regionNode.getRegionInfo()); - regionNode.setProcedure(proc); - return proc; - } finally { - regionNode.unlock(); - } - }).filter(p -> p != null).toArray(TransitRegionStateProcedure[]::new); + return regionStates.getTableRegionStateNodes(tableName).stream() + .map(this::forceCreateUnssignProcedure).filter(p -> p != null) + .toArray(TransitRegionStateProcedure[]::new); + } + + /** + * Called by ModifyTableProcedures to unassign all the excess region replicas + * for a table. + */ + public TransitRegionStateProcedure[] createUnassignProceduresForClosingExcessRegionReplicas( + TableName tableName, int newReplicaCount) { + return regionStates.getTableRegionStateNodes(tableName).stream() + .filter(regionNode -> regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) + .map(this::forceCreateUnssignProcedure).filter(p -> p != null) + .toArray(TransitRegionStateProcedure[]::new); } public SplitTableRegionProcedure createSplitProcedure(final RegionInfo regionToSplit, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 500e5ec79da0..78f2bb75fe8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -547,25 +547,12 @@ public void overwriteRegions(List regionInfos, int regionReplication LOG.debug("Overwritten regions: {} ", regionInfos); } - /** - * Update region replicas if necessary by adding new replica locations or removing unused region - * replicas - */ - public void updateRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) - throws IOException { - if (newReplicaCount < oldReplicaCount) { - removeRegionReplicas(tableName, oldReplicaCount, newReplicaCount); - } else if (newReplicaCount > oldReplicaCount) { - addRegionReplicas(tableName, oldReplicaCount, newReplicaCount); - } - } - private Scan getScanForUpdateRegionReplicas(TableName tableName) { return MetaTableAccessor.getScanForTableName(master.getConfiguration(), tableName) .addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); } - private void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + public void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) throws IOException { Scan scan = getScanForUpdateRegionReplicas(tableName); List deletes = new ArrayList<>(); @@ -598,32 +585,6 @@ private void removeRegionReplicas(TableName tableName, int oldReplicaCount, int } } - private void addRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) - throws IOException { - Scan scan = getScanForUpdateRegionReplicas(tableName); - List puts = new ArrayList<>(); - long now = EnvironmentEdgeManager.currentTime(); - try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { - for (;;) { - Result result = scanner.next(); - if (result == null) { - break; - } - RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); - if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { - continue; - } - Put put = new Put(result.getRow(), now); - for (int i = oldReplicaCount; i < newReplicaCount; i++) { - MetaTableAccessor.addEmptyLocation(put, i); - } - puts.add(put); - } - debugLogMutations(puts); - metaTable.put(puts); - } - } - // ========================================================================== // Table Descriptors helpers // ========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 6ca83fe01efe..1e48981e417c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -20,17 +20,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.CatalogFamilyFormat; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -100,7 +94,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS case ENABLE_TABLE_MARK_REGIONS_ONLINE: // Get the region replica count. If changed since disable, need to do // more work assigning. - Connection connection = env.getMasterServices().getConnection(); TableDescriptor tableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); int configuredReplicaCount = tableDescriptor.getRegionReplication(); @@ -111,25 +104,16 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS // How many replicas do we currently have? Check regions returned from // in-memory state. int currentMaxReplica = getMaxReplicaId(regionsOfTable); - - // Read the META table to know the number of replicas the table currently has. - // If there was a table modification on region replica count then need to - // adjust replica counts here. - int replicasFound = TableName.isMetaTableName(this.tableName)? - 0: // TODO: Figure better what to do here for hbase:meta replica. - getReplicaCountInMeta(connection, configuredReplicaCount, regionsOfTable); - LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound, - configuredReplicaCount, tableName.getNameAsString()); - if (currentMaxReplica == (configuredReplicaCount - 1)) { - if (LOG.isDebugEnabled()) { - LOG.debug("No change in number of region replicas (configuredReplicaCount={});" - + " assigning.", configuredReplicaCount); - } + if (currentMaxReplica == configuredReplicaCount - 1) { + LOG.debug("No change in number of region replicas (configuredReplicaCount={});" + + " assigning.", configuredReplicaCount); } else if (currentMaxReplica > (configuredReplicaCount - 1)) { // We have additional regions as the replica count has been decreased. Delete // those regions because already the table is in the unassigned state - LOG.info("The number of replicas " + (currentMaxReplica + 1) - + " is more than the region replica count " + configuredReplicaCount); + LOG.warn( + "The number of replicas {} is more than the region replica count {}" + + ", usually this should not happen as we will delete them in ModifyTableProcedure", + currentMaxReplica + 1, configuredReplicaCount); List copyOfRegions = new ArrayList(regionsOfTable); for (RegionInfo regionInfo : copyOfRegions) { if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) { @@ -140,11 +124,11 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS regionsOfTable.remove(regionInfo); } } - } else { + } else if (currentMaxReplica < configuredReplicaCount - 1) { // the replicasFound is less than the regionReplication LOG.info("Number of replicas has increased for {}. Assigning new region replicas." + "The previous replica count was {}. The current replica count is {}.", - this.tableName, (currentMaxReplica + 1), configuredReplicaCount); + this.tableName, currentMaxReplica + 1, configuredReplicaCount); regionsOfTable = RegionReplicaUtil.addReplicas(regionsOfTable, currentMaxReplica + 1, configuredReplicaCount); } @@ -174,25 +158,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS return Flow.HAS_MORE_STATE; } - /** - * @return Count of replicas found reading hbase:meta Region row or zk if - * asking about the hbase:meta table itself.. - */ - private int getReplicaCountInMeta(Connection connection, int regionReplicaCount, - List regionsOfTable) throws IOException { - Result r = MetaTableAccessor.getCatalogFamilyRow(connection, regionsOfTable.get(0)); - int replicasFound = 0; - for (int i = 1; i < regionReplicaCount; i++) { - // Since we have already added the entries to the META we will be getting only that here - List columnCells = - r.getColumnCells(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i)); - if (!columnCells.isEmpty()) { - replicasFound++; - } - } - return replicasFound; - } - @Override protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 9b29d30b9b36..beb129b6f52b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -24,16 +24,17 @@ import java.util.List; import java.util.Set; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.hadoop.hbase.ConcurrentTableModificationException; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -128,6 +129,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_PRE_OPERATION: preModify(env, state); + setNextState(ModifyTableState.MODIFY_TABLE_CLOSE_EXCESS_REPLICAS); + break; + case MODIFY_TABLE_CLOSE_EXCESS_REPLICAS: + if (isTableEnabled(env)) { + closeExcessReplicasIfNeeded(env); + } setNextState(ModifyTableState.MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR); break; case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR: @@ -135,7 +142,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN); break; case MODIFY_TABLE_REMOVE_REPLICA_COLUMN: - updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor); + removeReplicaColumnsIfNeeded(env); setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION); break; case MODIFY_TABLE_POST_OPERATION: @@ -146,6 +153,10 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS if (isTableEnabled(env)) { addChildProcedure(new ReopenTableRegionsProcedure(getTableName())); } + setNextState(ModifyTableState.MODIFY_TABLE_ASSIGN_NEW_REPLICAS); + break; + case MODIFY_TABLE_ASSIGN_NEW_REPLICAS: + assignNewReplicasIfNeeded(env); if (deleteColumnFamilyInModify) { setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); } else { @@ -297,14 +308,6 @@ private void prepareModify(final MasterProcedureEnv env) throws IOException { env.getMasterServices().getTableDescriptors().get(getTableName()); } - if (env.getMasterServices().getTableStateManager() - .isTableState(getTableName(), TableState.State.ENABLED)) { - if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor - .getRegionReplication()) { - throw new TableNotDisabledException( - "REGION_REPLICATION change is not supported for enabled tables"); - } - } this.deleteColumnFamilyInModify = isDeleteColumnFamily(unmodifiedTableDescriptor, modifiedTableDescriptor); if (!unmodifiedTableDescriptor.getRegionServerGroup() @@ -375,17 +378,36 @@ private void deleteFromFs(final MasterProcedureEnv env, } /** - * update replica column families if necessary. + * remove replica columns if necessary. */ - private void updateReplicaColumnsIfNeeded(MasterProcedureEnv env, - TableDescriptor oldTableDescriptor, TableDescriptor newTableDescriptor) throws IOException { - final int oldReplicaCount = oldTableDescriptor.getRegionReplication(); - final int newReplicaCount = newTableDescriptor.getRegionReplication(); - env.getAssignmentManager().getRegionStateStore().updateRegionReplicas(getTableName(), + private void removeReplicaColumnsIfNeeded(MasterProcedureEnv env) throws IOException { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount >= oldReplicaCount) { + return; + } + env.getAssignmentManager().getRegionStateStore().removeRegionReplicas(getTableName(), oldReplicaCount, newReplicaCount); - if (newReplicaCount > oldReplicaCount && oldReplicaCount <= 1) { - // The table has been newly enabled for replica. So check if we need to setup - // region replication + env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()).stream() + .filter(r -> r.getReplicaId() >= newReplicaCount) + .forEach(env.getAssignmentManager().getRegionStates()::deleteRegion); + } + + private void assignNewReplicasIfNeeded(MasterProcedureEnv env) throws IOException { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount <= oldReplicaCount) { + return; + } + if (isTableEnabled(env)) { + List newReplicas = env.getAssignmentManager().getRegionStates() + .getRegionsOfTable(getTableName()).stream().filter(RegionReplicaUtil::isDefaultReplica) + .flatMap(primaryRegion -> IntStream.range(oldReplicaCount, newReplicaCount).mapToObj( + replicaId -> RegionReplicaUtil.getRegionInfoForReplica(primaryRegion, replicaId))) + .collect(Collectors.toList()); + addChildProcedure(env.getAssignmentManager().createAssignProcedures(newReplicas)); + } + if (oldReplicaCount <= 1) { try { ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); } catch (ReplicationException e) { @@ -394,6 +416,16 @@ private void updateReplicaColumnsIfNeeded(MasterProcedureEnv env, } } + private void closeExcessReplicasIfNeeded(MasterProcedureEnv env) { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount >= oldReplicaCount) { + return; + } + addChildProcedure(env.getAssignmentManager() + .createUnassignProceduresForClosingExcessRegionReplicas(getTableName(), newReplicaCount)); + } + /** * Action after modifying table. * @param env MasterProcedureEnv diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 528b155cb94c..1fb2f00d67cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1846,11 +1846,9 @@ public static void modifyTableSync(Admin admin, TableDescriptor desc) */ public static void setReplicas(Admin admin, TableName table, int replicaCount) throws IOException, InterruptedException { - admin.disableTable(table); TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table)) .setRegionReplication(replicaCount).build(); admin.modifyTable(desc); - admin.enableTable(table); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java index c648db94aa39..441d40194a9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java @@ -263,26 +263,6 @@ public void testReadOnlyTableModify() throws IOException, InterruptedException { assertFalse(ADMIN.tableExists(tableName)); } - @Test(expected = TableNotDisabledException.class) - public void testModifyRegionReplicasEnabledTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - - // Modify region replication count - TableDescriptor htd = TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)) - .setRegionReplication(3).build(); - try { - // try to modify the region replication count without disabling the table - ADMIN.modifyTable(htd); - fail("Expected an exception"); - } finally { - // Delete the table - ADMIN.disableTable(tableName); - ADMIN.deleteTable(tableName); - assertFalse(ADMIN.tableExists(tableName)); - } - } - @Test public void testDeleteLastColumnFamily() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index a53771d46c7b..ad1340104a98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -408,7 +408,7 @@ public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws I MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3); final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); - regionStateStore.updateRegionReplicas(tableName, 3, 1); + regionStateStore.removeRegionReplicas(tableName, 3, 1); Get get = new Get(primary.getRegionName()); Result result = meta.get(get); for (int replicaId = 0; replicaId < 3; replicaId++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index 4461f2f0ede4..f5cc543e86d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -371,7 +371,7 @@ public void testRollbackAndDoubleExecutionOnline() throws Exception { long procId = procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), newTd)); - int lastStep = 3; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR + int lastStep = 8; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); // cf2 should not be present @@ -404,7 +404,7 @@ public void testRollbackAndDoubleExecutionOffline() throws Exception { new ModifyTableProcedure(procExec.getEnvironment(), newTd)); // Restart the executor and rollback the step twice - int lastStep = 3; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR + int lastStep = 8; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); // cf2 should not be present diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java index f01038eee70c..4ca8059bc285 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java @@ -20,51 +20,64 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RegionSplitter; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +@RunWith(Parameterized.class) @Category({ RegionServerTests.class, MediumTests.class }) public class TestRegionReplicasWithModifyTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicasWithModifyTable.class); + HBaseClassTestRule.forClass(TestRegionReplicasWithModifyTable.class); private static final int NB_SERVERS = 3; - private static Table table; private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); private static final byte[] f = HConstants.CATALOG_FAMILY; + @Parameter + public boolean disableBeforeModifying; + @Rule - public TestName name = new TestName(); + public TableNameTestRule name = new TableNameTestRule(); + + @Parameters + public static List params() { + return Arrays.asList(new Object[] { true }, new Object[] { false }); + } @BeforeClass public static void before() throws Exception { HTU.startMiniCluster(NB_SERVERS); } - private static void enableReplicationByModification(final TableName tableName, - boolean withReplica, int initialReplicaCount, int enableReplicaCount, int splitCount) - throws IOException, InterruptedException { + private void enableReplicationByModification(boolean withReplica, int initialReplicaCount, + int enableReplicaCount, int splitCount) throws IOException, InterruptedException { + TableName tableName = name.getTableName(); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); if (withReplica) { builder.setRegionReplication(initialReplicaCount); @@ -72,14 +85,25 @@ private static void enableReplicationByModification(final TableName tableName, TableDescriptor htd = builder.build(); if (splitCount > 0) { byte[][] splits = getSplits(splitCount); - table = HTU.createTable(htd, new byte[][] { f }, splits, - new Configuration(HTU.getConfiguration())); - + HTU.createTable(htd, new byte[][] { f }, splits, new Configuration(HTU.getConfiguration())); } else { - table = HTU.createTable(htd, new byte[][] { f }, (byte[][]) null, + HTU.createTable(htd, new byte[][] { f }, (byte[][]) null, new Configuration(HTU.getConfiguration())); } - HBaseTestingUtility.setReplicas(HTU.getAdmin(), table.getName(), enableReplicaCount); + if (disableBeforeModifying) { + HTU.getAdmin().disableTable(tableName); + } + HBaseTestingUtility.setReplicas(HTU.getAdmin(), tableName, enableReplicaCount); + if (disableBeforeModifying) { + HTU.getAdmin().enableTable(tableName); + } + int expectedRegionCount; + if (splitCount > 0) { + expectedRegionCount = enableReplicaCount * splitCount; + } else { + expectedRegionCount = enableReplicaCount; + } + assertTotalRegions(expectedRegionCount); } private static byte[][] getSplits(int numRegions) { @@ -91,123 +115,50 @@ private static byte[][] getSplits(int numRegions) { @AfterClass public static void afterClass() throws Exception { - HRegionServer.TEST_SKIP_REPORTING_TRANSITION = false; - table.close(); HTU.shutdownMiniCluster(); } - private HRegionServer getRS() { - return HTU.getMiniHBaseCluster().getRegionServer(0); - } - - private HRegionServer getSecondaryRS() { - return HTU.getMiniHBaseCluster().getRegionServer(1); + @After + public void tearDown() throws IOException { + TableName tableName = name.getTableName(); + HTU.getAdmin().disableTable(tableName); + HTU.getAdmin().deleteTable(tableName); } - private HRegionServer getTertiaryRS() { - return HTU.getMiniHBaseCluster().getRegionServer(2); + private void assertTotalRegions(int expected) { + int actual = HTU.getHBaseCluster().getRegions(name.getTableName()).size(); + assertEquals(expected, actual); } @Test public void testRegionReplicasUsingEnableTable() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, false, 0, 3, 0); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be more than 1", 3, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } - } - - private void disableAndDeleteTable(TableName tableName) throws IOException { - HTU.getAdmin().disableTable(tableName); - HTU.getAdmin().deleteTable(tableName); + enableReplicationByModification(false, 0, 3, 0); } @Test public void testRegionReplicasUsingEnableTableForMultipleRegions() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, false, 0, 3, 10); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 30", 30, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + enableReplicationByModification(false, 0, 3, 10); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsIncreased() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 2, 3, 0); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be 3", 3, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + enableReplicationByModification(true, 2, 3, 0); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsDecreased() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 3, 2, 0); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 2", 2, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + enableReplicationByModification(true, 3, 2, 0); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsDecreasedWithMultipleRegions() - throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 3, 2, 20); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 40", 40, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + throws Exception { + enableReplicationByModification(true, 3, 2, 20); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsIncreasedWithmultipleRegions() - throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 2, 3, 15); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 45", 3 * 15, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + throws Exception { + enableReplicationByModification(true, 2, 3, 15); } } From 8a995ae81bd551c81a2da58627b803f317241469 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 8 Oct 2020 09:10:30 -0700 Subject: [PATCH 115/769] HBASE-25165 Change 'State time' in UI so sorts (#2508) Display startcode in iso8601. Signed-off-by: Nick Dimiduk --- .../apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon index a3067eeaf8ad..58783611180e 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon @@ -121,7 +121,7 @@ Arrays.sort(serverNames); <& serverNameLink; serverName=serverName; &> - <% new Date(startcode) %> + <% java.time.Instant.ofEpochMilli(startcode) %> <% TraditionalBinaryPrefix.long2String(lastContact, "s", 1) %> <% version %> <% String.format("%,.0f", requestsPerSecond) %> From bdcafa895ceee2f8a6b62968476eb2392b10f45b Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Thu, 8 Oct 2020 23:00:16 +0530 Subject: [PATCH 116/769] HBASE-24025: Improve performance of move_servers_rsgroup by using async region move API (#1549) --- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 153 ++++++++++++------ .../hbase/rsgroup/TestRSGroupsAdmin2.java | 35 ++++ 2 files changed, 135 insertions(+), 53 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index e1d9d66e4ec3..16a44ad76ccc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -33,6 +33,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; +import java.util.concurrent.Future; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -78,6 +79,7 @@ import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -956,84 +958,129 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { /** * Move every region from servers which are currently located on these servers, but should not be * located there. - * @param servers the servers that will move to new group - * @param targetGroupName the target group name + * @param movedServers the servers that are moved to new group + * @param srcGrpServers all servers in the source group, excluding the movedServers + * @param targetGroup the target group * @throws IOException if moving the server and tables fail */ - private void moveServerRegionsFromGroup(Set

servers, String targetGroupName) - throws IOException { - moveRegionsBetweenGroups(servers, targetGroupName, rs -> getRegions(rs), info -> { - try { - String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) + private void moveServerRegionsFromGroup(Set
movedServers, Set
srcGrpServers, + RSGroupInfo targetGroup) throws IOException { + moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroup, rs -> getRegions(rs), + info -> { + try { + String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); - return groupName.equals(targetGroupName); - } catch (IOException e) { - LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName); - return false; - } - }, rs -> rs.getHostname()); + return groupName.equals(targetGroup.getName()); + } catch (IOException e) { + LOG.warn("Failed to test group for region {} and target group {}", info, + targetGroup.getName()); + return false; + } + }); } - private void moveRegionsBetweenGroups(Set regionsOwners, String targetGroupName, - Function> getRegionsInfo, Function validation, - Function getOwnerName) throws IOException { - boolean hasRegionsToMove; + private void moveRegionsBetweenGroups(Set regionsOwners, Set
newRegionsOwners, + RSGroupInfo targetGrp, Function> getRegionsInfo, + Function validation) throws IOException { + // Get server names corresponding to given Addresses + List movedServerNames = new ArrayList<>(regionsOwners.size()); + List srcGrpServerNames = new ArrayList<>(newRegionsOwners.size()); + for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) { + // In case region move failed in previous attempt, regionsOwners and newRegionsOwners + // can have the same servers. So for all servers below both conditions to be checked + if (newRegionsOwners.contains(serverName.getAddress())) { + srcGrpServerNames.add(serverName); + } + if (regionsOwners.contains(serverName.getAddress())) { + movedServerNames.add(serverName); + } + } + List>> assignmentFutures = new ArrayList<>(); int retry = 0; - Set allOwners = new HashSet<>(regionsOwners); Set failedRegions = new HashSet<>(); IOException toThrow = null; do { - hasRegionsToMove = false; - for (Iterator iter = allOwners.iterator(); iter.hasNext(); ) { - T owner = iter.next(); + assignmentFutures.clear(); + failedRegions.clear(); + for (ServerName owner : movedServerNames) { // Get regions that are associated with this server and filter regions by group tables. - for (RegionInfo region : getRegionsInfo.apply(owner)) { + for (RegionInfo region : getRegionsInfo.apply((T) owner.getAddress())) { if (!validation.apply(region)) { LOG.info("Moving region {}, which do not belong to RSGroup {}", - region.getShortNameToLog(), targetGroupName); + region.getShortNameToLog(), targetGrp.getName()); + // Move region back to source RSGroup servers + ServerName dest = + masterServices.getLoadBalancer().randomAssignment(region, srcGrpServerNames); + if (dest == null) { + failedRegions.add(region.getRegionNameAsString()); + continue; + } + RegionPlan rp = new RegionPlan(region, owner, dest); try { - this.masterServices.getAssignmentManager().move(region); - failedRegions.remove(region.getRegionNameAsString()); + Future future = masterServices.getAssignmentManager().moveAsync(rp); + assignmentFutures.add(Pair.newPair(region, future)); } catch (IOException ioe) { + failedRegions.add(region.getRegionNameAsString()); LOG.debug("Move region {} from group failed, will retry, current retry time is {}", - region.getShortNameToLog(), retry, ioe); + region.getShortNameToLog(), retry, ioe); toThrow = ioe; - failedRegions.add(region.getRegionNameAsString()); } - if (masterServices.getAssignmentManager().getRegionStates(). - getRegionState(region).isFailedOpen()) { - continue; - } - hasRegionsToMove = true; } } - - if (!hasRegionsToMove) { - LOG.info("No more regions to move from {} to RSGroup", getOwnerName.apply(owner)); - iter.remove(); - } } - - retry++; - try { - wait(1000); - } catch (InterruptedException e) { - LOG.warn("Sleep interrupted", e); - Thread.currentThread().interrupt(); + waitForRegionMovement(assignmentFutures, failedRegions, targetGrp.getName(), retry); + if (failedRegions.isEmpty()) { + LOG.info("All regions from server(s) {} moved to target group {}.", movedServerNames, + targetGrp.getName()); + return; + } else { + try { + wait(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + Thread.currentThread().interrupt(); + } + retry++; } - } while (hasRegionsToMove && retry <= - masterServices.getConfiguration().getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); + } while (!failedRegions.isEmpty() && retry <= masterServices.getConfiguration() + .getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); //has up to max retry time or there are no more regions to move - if (hasRegionsToMove) { + if (!failedRegions.isEmpty()) { // print failed moved regions, for later process conveniently String msg = String - .format("move regions for group %s failed, failed regions: %s", targetGroupName, - failedRegions); + .format("move regions for group %s failed, failed regions: %s", targetGrp.getName(), + failedRegions); LOG.error(msg); throw new DoNotRetryIOException( - msg + ", just record the last failed region's cause, more details in server log", - toThrow); + msg + ", just record the last failed region's cause, more details in server log", toThrow); + } + } + + /** + * Wait for all the region move to complete. Keep waiting for other region movement + * completion even if some region movement fails. + */ + private void waitForRegionMovement(List>> regionMoveFutures, + Set failedRegions, String tgtGrpName, int retryCount) { + LOG.info("Moving {} region(s) to group {}, current retry={}", regionMoveFutures.size(), + tgtGrpName, retryCount); + for (Pair> pair : regionMoveFutures) { + try { + pair.getSecond().get(); + if (masterServices.getAssignmentManager().getRegionStates(). + getRegionState(pair.getFirst()).isFailedOpen()) { + failedRegions.add(pair.getFirst().getRegionNameAsString()); + } + } catch (InterruptedException e) { + //Dont return form there lets wait for other regions to complete movement. + failedRegions.add(pair.getFirst().getRegionNameAsString()); + LOG.warn("Sleep interrupted", e); + } catch (Exception e) { + failedRegions.add(pair.getFirst().getRegionNameAsString()); + LOG.error("Move region {} to group {} failed, will retry on next attempt", + pair.getFirst().getShortNameToLog(), tgtGrpName, e); + } } } @@ -1185,7 +1232,7 @@ public void moveServers(Set
servers, String targetGroupName) throws IOE if (StringUtils.isEmpty(targetGroupName)) { throw new ConstraintException("RSGroup cannot be null."); } - getRSGroupInfo(targetGroupName); + RSGroupInfo targetGroup = getRSGroupInfo(targetGroupName); // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. @@ -1230,7 +1277,7 @@ public void moveServers(Set
servers, String targetGroupName) throws IOE // MovedServers may be < passed in 'servers'. Set
movedServers = moveServers(servers, srcGrp.getName(), targetGroupName); - moveServerRegionsFromGroup(movedServers, targetGroupName); + moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroup); LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index a3a08eabe996..983414236c3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -686,4 +686,39 @@ public void testFailedMoveServersTablesAndRepair() throws Exception { assertEquals(regionsInfo.getTable(), table2); } } + + @Test + public void testMoveServersToRSGroupPerformance() throws Exception { + final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 2); + final byte[] familyNameBytes = Bytes.toBytes("f"); + // there will be 100 regions are both the serves + final int tableRegionCount = 200; + // All the regions created below will be assigned to the default group. + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, tableRegionCount); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) { + return false; + } + return getTableRegionMap().get(tableName).size() >= tableRegionCount; + } + }); + ADMIN.setRSGroup(Sets.newHashSet(tableName), newGroup.getName()); + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + String rsGroup2 = "rsGroup2"; + ADMIN.addRSGroup(rsGroup2); + + long startTime = System.currentTimeMillis(); + ADMIN.moveServersToRSGroup(Sets.newHashSet(newGroup.getServers().first()), rsGroup2); + long timeTaken = System.currentTimeMillis() - startTime; + String msg = + "Should not take mote than 15000 ms to move a table with 100 regions. Time taken =" + + timeTaken + " ms"; + //This test case is meant to be used for verifying the performance quickly by a developer. + //Moving 100 regions takes much less than 15000 ms. Given 15000 ms so test cases passes + // on all environment. + assertTrue(msg, timeTaken < 15000); + LOG.info("Time taken to move a table with 100 region is {} ms", timeTaken); + } } From b82d8a55171b7815662defc5c0d01f5f07baf520 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 8 Oct 2020 14:23:38 -0700 Subject: [PATCH 117/769] HBASE-25156 TestMasterFailover.testSimpleMasterFailover is flaky (#2507) Change the test to wait for evidence that the active master has seen that the backup master killed by the test has gone away. This is done before proceeding to validate that the dead backup is correctly omitted from the ClusterStatus report. Also, minor fixup to several assertions, using `assertEquals` instead of `assertTrue(...equals(...))` and correcting expected vs. actual ordering of assertion arguments. Signed-off-by: Michael Stack --- .../hbase/master/TestMasterFailover.java | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index a27936df07a4..1e37fcb52b58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +20,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; - import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -100,7 +100,7 @@ public void testSimpleMasterFailover() throws Exception { // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); ClusterMetrics status = active.getClusterMetrics(); - assertTrue(status.getMasterName().equals(activeName)); + assertEquals(activeName, status.getMasterName()); assertEquals(2, status.getBackupMasterNames().size()); // attempt to stop one of the inactive masters @@ -113,7 +113,7 @@ public void testSimpleMasterFailover() throws Exception { // Verify still one active master and it's the same for (int i = 0; i < masterThreads.size(); i++) { if (masterThreads.get(i).getMaster().isActiveMaster()) { - assertTrue(activeName.equals(masterThreads.get(i).getMaster().getServerName())); + assertEquals(activeName, masterThreads.get(i).getMaster().getServerName()); activeIndex = i; active = masterThreads.get(activeIndex).getMaster(); } @@ -126,10 +126,15 @@ public void testSimpleMasterFailover() throws Exception { " regions servers"); assertEquals(3, rsCount); + // wait for the active master to acknowledge loss of the backup from ZK + final HMaster activeFinal = active; + TEST_UTIL.waitFor( + TimeUnit.SECONDS.toMillis(30), () -> activeFinal.getBackupMasters().size() == 1); + // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); status = active.getClusterMetrics(); - assertTrue(status.getMasterName().equals(activeName)); + assertEquals(activeName, status.getMasterName()); assertEquals(1, status.getBackupMasterNames().size()); // kill the active master @@ -148,13 +153,13 @@ public void testSimpleMasterFailover() throws Exception { active = masterThreads.get(0).getMaster(); assertNotNull(active); status = active.getClusterMetrics(); - ServerName mastername = status.getMasterName(); - assertTrue(mastername.equals(active.getServerName())); + ServerName masterName = status.getMasterName(); + assertNotNull(masterName); + assertEquals(active.getServerName(), masterName); assertTrue(active.isActiveMaster()); assertEquals(0, status.getBackupMasterNames().size()); int rss = status.getLiveServerMetrics().size(); - LOG.info("Active master " + mastername.getServerName() + " managing " + - rss + " region servers"); + LOG.info("Active master {} managing {} region servers", masterName.getServerName(), rss); assertEquals(3, rss); } finally { // Stop the cluster From 78ae1f176d4215dcc34067ed25d786a4fcd4d888 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Wed, 23 Sep 2020 16:47:23 -0700 Subject: [PATCH 118/769] HBASE-24628 Region normalizer now respects a rate limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement a rate limiter for the normalizer. Implemented in terms of MB/sec of affacted region size (the same metrics used to make normalization decisions). Uses Guava `RateLimiter` to perform the resource accounting. `RateLimiter` works by blocking (uninterruptible 😖) the calling thread. Thus, the whole construction of the normalizer subsystem needed refactoring. See the provided `package-info.java` for an overview of this new structure. Introduces a new configuration, `hbase.normalizer.throughput.max_bytes_per_sec`, for specifying a limit on the throughput of actions executed by the normalizer. Note that while this configuration value is in bytes, the minimum honored valued `1_000_000`. Supports values configured using the human-readable suffixes honored by `Configuration.getLongBytes` Signed-off-by: Viraj Jasani Signed-off-by: Huaxiang Sun Signed-off-by: Michael Stack --- .../apache/hadoop/hbase/master/HMaster.java | 180 +++---------- .../hbase/master/MasterRpcServices.java | 30 ++- .../hadoop/hbase/master/MasterServices.java | 27 +- .../master/MetricsMasterWrapperImpl.java | 4 +- .../MergeTableRegionsProcedure.java | 8 +- .../assignment/SplitTableRegionProcedure.java | 17 +- .../normalizer/MergeNormalizationPlan.java | 72 ++--- .../master/normalizer/NormalizationPlan.java | 18 +- .../normalizer/NormalizationTarget.java | 80 ++++++ .../master/normalizer/RegionNormalizer.java | 24 +- .../normalizer/RegionNormalizerChore.java | 24 +- .../normalizer/RegionNormalizerFactory.java | 30 ++- .../normalizer/RegionNormalizerManager.java | 174 ++++++++++++ .../normalizer/RegionNormalizerWorkQueue.java | 244 +++++++++++++++++ .../normalizer/RegionNormalizerWorker.java | 253 ++++++++++++++++++ .../normalizer/SimpleRegionNormalizer.java | 49 +--- .../normalizer/SplitNormalizationPlan.java | 29 +- .../hbase/master/normalizer/package-info.java | 100 +++++++ .../hbase/master/MockNoopMasterServices.java | 23 +- .../master/TestMasterChoreScheduled.java | 35 +-- .../master/TestMasterMetricsWrapper.java | 6 +- .../TestRegionNormalizerWorkQueue.java | 234 ++++++++++++++++ .../TestRegionNormalizerWorker.java | 252 +++++++++++++++++ .../TestSimpleRegionNormalizer.java | 85 ++++-- .../TestSimpleRegionNormalizerOnCluster.java | 7 +- 25 files changed, 1634 insertions(+), 371 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index cf43c8b814c4..9c617bbe7f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -29,7 +29,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; -import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -49,7 +48,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.servlet.ServletException; @@ -117,11 +115,8 @@ import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; @@ -202,7 +197,6 @@ import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HFileArchiveUtil; @@ -233,7 +227,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Service; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; @@ -337,9 +330,6 @@ public void run() { // Tracker for split and merge state private SplitOrMergeTracker splitOrMergeTracker; - // Tracker for region normalizer state - private RegionNormalizerTracker regionNormalizerTracker; - private ClusterSchemaService clusterSchemaService; public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = @@ -406,11 +396,8 @@ public void run() { private final LockManager lockManager = new LockManager(this); private RSGroupBasedLoadBalancer balancer; - // a lock to prevent concurrent normalization actions. - private final ReentrantLock normalizationInProgressLock = new ReentrantLock(); - private RegionNormalizer normalizer; private BalancerChore balancerChore; - private RegionNormalizerChore normalizerChore; + private RegionNormalizerManager regionNormalizerManager; private ClusterStatusChore clusterStatusChore; private ClusterStatusPublisher clusterStatusPublisherChore = null; private SnapshotCleanerChore snapshotCleanerChore = null; @@ -464,9 +451,6 @@ public void run() { // handle table states private TableStateManager tableStateManager; - private long splitPlanCount; - private long mergePlanCount; - /** jetty server for master to redirect requests to regionserver infoServer */ private Server masterJettyServer; @@ -788,26 +772,19 @@ public MetricsMaster getMasterMetrics() { } /** - *

* Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it * should have already been initialized along with {@link ServerManager}. - *

- *

- * Will be overridden in tests. - *

*/ - @VisibleForTesting - protected void initializeZKBasedSystemTrackers() - throws IOException, InterruptedException, KeeperException, ReplicationException { + private void initializeZKBasedSystemTrackers() + throws IOException, KeeperException, ReplicationException { this.balancer = new RSGroupBasedLoadBalancer(); this.balancer.setConf(conf); this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this); this.loadBalancerTracker.start(); - this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf); - this.normalizer.setMasterServices(this); - this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this); - this.regionNormalizerTracker.start(); + this.regionNormalizerManager = + RegionNormalizerFactory.createNormalizerManager(conf, zooKeeper, this); + this.regionNormalizerManager.start(); this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this); this.splitOrMergeTracker.start(); @@ -900,10 +877,10 @@ protected AssignmentManager createAssignmentManager(MasterServices master) { * * *
  • If this is a new deploy, schedule a InitMetaProcedure to initialize meta
  • - *
  • Start necessary service threads - balancer, catalog janior, executor services, and also the - * procedure executor, etc. Notice that the balancer must be created first as assignment manager - * may use it when assigning regions.
  • - *
  • Wait for meta to be initialized if necesssary, start table state manager.
  • + *
  • Start necessary service threads - balancer, catalog janitor, executor services, and also + * the procedure executor, etc. Notice that the balancer must be created first as assignment + * manager may use it when assigning regions.
  • + *
  • Wait for meta to be initialized if necessary, start table state manager.
  • *
  • Wait for enough region servers to check-in
  • *
  • Let assignment manager load data from meta and construct region states
  • *
  • Start all other things such as chore services, etc
  • @@ -1116,8 +1093,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc getChoreService().scheduleChore(clusterStatusChore); this.balancerChore = new BalancerChore(this); getChoreService().scheduleChore(balancerChore); - this.normalizerChore = new RegionNormalizerChore(this); - getChoreService().scheduleChore(normalizerChore); + getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); this.catalogJanitorChore = new CatalogJanitor(this); getChoreService().scheduleChore(catalogJanitorChore); this.hbckChore = new HbckChore(this); @@ -1533,6 +1509,9 @@ protected void stopServiceThreads() { // example. stopProcedureExecutor(); + if (regionNormalizerManager != null) { + regionNormalizerManager.stop(); + } if (this.quotaManager != null) { this.quotaManager.stop(); } @@ -1651,7 +1630,7 @@ private void stopChores() { choreService.cancelChore(this.mobFileCleanerChore); choreService.cancelChore(this.mobFileCompactionChore); choreService.cancelChore(this.balancerChore); - choreService.cancelChore(this.normalizerChore); + choreService.cancelChore(getRegionNormalizerManager().getRegionNormalizerChore()); choreService.cancelChore(this.clusterStatusChore); choreService.cancelChore(this.catalogJanitorChore); choreService.cancelChore(this.clusterStatusPublisherChore); @@ -1751,7 +1730,9 @@ public boolean balance() throws IOException { * @param action the name of the action under consideration, for logging. * @return {@code true} when the caller should exit early, {@code false} otherwise. */ - private boolean skipRegionManagementAction(final String action) { + @Override + public boolean skipRegionManagementAction(final String action) { + // Note: this method could be `default` on MasterServices if but for logging. if (!isInitialized()) { LOG.debug("Master has not been initialized, don't run {}.", action); return true; @@ -1896,24 +1877,16 @@ public List executeRegionPlansWithThrottling(List plans) } @Override - public RegionNormalizer getRegionNormalizer() { - return this.normalizer; + public RegionNormalizerManager getRegionNormalizerManager() { + return regionNormalizerManager; } - public boolean normalizeRegions() throws IOException { - return normalizeRegions(new NormalizeTableFilterParams.Builder().build()); - } - - /** - * Perform normalization of cluster. - * - * @return true if an existing normalization was already in progress, or if a new normalization - * was performed successfully; false otherwise (specifically, if HMaster finished initializing - * or normalization is globally disabled). - */ - public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IOException { - final long startTime = EnvironmentEdgeManager.currentTime(); - if (regionNormalizerTracker == null || !regionNormalizerTracker.isNormalizerOn()) { + @Override + public boolean normalizeRegions( + final NormalizeTableFilterParams ntfp, + final boolean isHighPriority + ) throws IOException { + if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); return false; } @@ -1924,70 +1897,17 @@ public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IO return false; } - if (!normalizationInProgressLock.tryLock()) { - // Don't run the normalizer concurrently - LOG.info("Normalization already in progress. Skipping request."); - return true; - } - - int affectedTables = 0; - try { - final Set matchingTables = getTableDescriptors(new LinkedList<>(), - ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) - .stream() - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); - final Set allEnabledTables = - tableStateManager.getTablesInStates(TableState.State.ENABLED); - final List targetTables = - new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); - Collections.shuffle(targetTables); - - final List submittedPlanProcIds = new ArrayList<>(); - for (TableName table : targetTables) { - if (table.isSystemTable()) { - continue; - } - final TableDescriptor tblDesc = getTableDescriptors().get(table); - if (tblDesc != null && !tblDesc.isNormalizationEnabled()) { - LOG.debug( - "Skipping table {} because normalization is disabled in its table properties.", table); - continue; - } - - // make one last check that the cluster isn't shutting down before proceeding. - if (skipRegionManagementAction("region normalizer")) { - return false; - } - - final List plans = normalizer.computePlansForTable(table); - if (CollectionUtils.isEmpty(plans)) { - LOG.debug("No normalization required for table {}.", table); - continue; - } - - affectedTables++; - // as of this writing, `plan.submit()` is non-blocking and uses Async Admin APIs to - // submit task , so there's no artificial rate- - // limiting of merge/split requests due to this serial loop. - for (NormalizationPlan plan : plans) { - long procId = plan.submit(this); - submittedPlanProcIds.add(procId); - if (plan.getType() == PlanType.SPLIT) { - splitPlanCount++; - } else if (plan.getType() == PlanType.MERGE) { - mergePlanCount++; - } - } - } - final long endTime = EnvironmentEdgeManager.currentTime(); - LOG.info("Normalizer ran successfully in {}. Submitted {} plans, affecting {} tables.", - Duration.ofMillis(endTime - startTime), submittedPlanProcIds.size(), affectedTables); - LOG.debug("Normalizer submitted procID list: {}", submittedPlanProcIds); - } finally { - normalizationInProgressLock.unlock(); - } - return true; + final Set matchingTables = getTableDescriptors(new LinkedList<>(), + ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) + .stream() + .map(TableDescriptor::getTableName) + .collect(Collectors.toSet()); + final Set allEnabledTables = + tableStateManager.getTablesInStates(TableState.State.ENABLED); + final List targetTables = + new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); + Collections.shuffle(targetTables); + return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority); } /** @@ -3003,20 +2923,6 @@ public double getAverageLoad() { return regionStates.getAverageLoad(); } - /* - * @return the count of region split plans executed - */ - public long getSplitPlanCount() { - return splitPlanCount; - } - - /* - * @return the count of region merge plans executed - */ - public long getMergePlanCount() { - return mergePlanCount; - } - @Override public boolean registerService(Service instance) { /* @@ -3511,8 +3417,7 @@ public boolean isBalancerOn() { */ public boolean isNormalizerOn() { return !isInMaintenanceMode() - && regionNormalizerTracker != null - && regionNormalizerTracker.isNormalizerOn(); + && getRegionNormalizerManager().isNormalizerOn(); } /** @@ -3540,13 +3445,6 @@ public String getLoadBalancerClassName() { LoadBalancerFactory.getDefaultLoadBalancerClass().getName()); } - /** - * @return RegionNormalizerTracker instance - */ - public RegionNormalizerTracker getRegionNormalizerTracker() { - return regionNormalizerTracker; - } - public SplitOrMergeTracker getSplitOrMergeTracker() { return splitOrMergeTracker; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 37fc58985e7b..d4dbc8d55dcd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1913,9 +1913,7 @@ public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController contr master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); } } - } catch (IOException e) { - throw new ServiceException(e); - } catch (KeeperException e) { + } catch (IOException | KeeperException e) { throw new ServiceException(e); } return response.build(); @@ -1940,7 +1938,8 @@ public NormalizeResponse normalize(RpcController controller, .namespace(request.hasNamespace() ? request.getNamespace() : null) .build(); return NormalizeResponse.newBuilder() - .setNormalizerRan(master.normalizeRegions(ntfp)) + // all API requests are considered priority requests. + .setNormalizerRan(master.normalizeRegions(ntfp, true)) .build(); } catch (IOException ex) { throw new ServiceException(ex); @@ -1953,20 +1952,27 @@ public SetNormalizerRunningResponse setNormalizerRunning(RpcController controlle rpcPreCheck("setNormalizerRunning"); // Sets normalizer on/off flag in ZK. - boolean prevValue = master.getRegionNormalizerTracker().isNormalizerOn(); - boolean newValue = request.getOn(); - try { - master.getRegionNormalizerTracker().setNormalizerOn(newValue); - } catch (KeeperException ke) { - LOG.warn("Error flipping normalizer switch", ke); - } + // TODO: this method is totally broken in terms of atomicity of actions and values read. + // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method + // that lets us retrieve the previous value as part of setting a new value, so we simply + // perform a read before issuing the update. Thus we have a data race opportunity, between + // when the `prevValue` is read and whatever is actually overwritten. + // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can + // itself fail in the event that the znode already exists. Thus, another data race, between + // when the initial `setData` call is notified of the absence of the target znode and the + // subsequent `createAndWatch`, with another client creating said node. + // That said, there's supposed to be only one active master and thus there's supposed to be + // only one process with the authority to modify the value. + final boolean prevValue = master.getRegionNormalizerManager().isNormalizerOn(); + final boolean newValue = request.getOn(); + master.getRegionNormalizerManager().setNormalizerOn(newValue); LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue); return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); } @Override public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, - IsNormalizerEnabledRequest request) throws ServiceException { + IsNormalizerEnabledRequest request) { IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder(); response.setEnabled(master.isNormalizerOn()); return response.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 908d21270c6e..384785d738f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -34,7 +35,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; @@ -54,7 +55,6 @@ import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.Service; @@ -122,9 +122,9 @@ public interface MasterServices extends Server { MasterQuotaManager getMasterQuotaManager(); /** - * @return Master's instance of {@link RegionNormalizer} + * @return Master's instance of {@link RegionNormalizerManager} */ - RegionNormalizer getRegionNormalizer(); + RegionNormalizerManager getRegionNormalizerManager(); /** * @return Master's instance of {@link CatalogJanitor} @@ -354,6 +354,13 @@ long splitRegion( */ boolean isInMaintenanceMode(); + /** + * Checks master state before initiating action over region topology. + * @param action the name of the action under consideration, for logging. + * @return {@code true} when the caller should exit early, {@code false} otherwise. + */ + boolean skipRegionManagementAction(final String action); + /** * Abort a procedure. * @param procId ID of the procedure @@ -553,4 +560,14 @@ default SplitWALManager getSplitWALManager(){ * @return The state of the load balancer, or false if the load balancer isn't defined. */ boolean isBalancerOn(); + + /** + * Perform normalization of cluster. + * @param ntfp Selection criteria for identifying which tables to normalize. + * @param isHighPriority {@code true} when these requested tables should skip to the front of + * the queue. + * @return {@code true} when the request was submitted, {@code false} otherwise. + */ + boolean normalizeRegions( + final NormalizeTableFilterParams ntfp, final boolean isHighPriority) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java index 9d4550c5eb0a..aeaae929209e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java @@ -55,12 +55,12 @@ public double getAverageLoad() { @Override public long getSplitPlanCount() { - return master.getSplitPlanCount(); + return master.getRegionNormalizerManager().getSplitPlanCount(); } @Override public long getMergePlanCount() { - return master.getMergePlanCount(); + return master.getRegionNormalizerManager().getMergePlanCount(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index f1b3329b25c0..5e06a44912b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -59,9 +59,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -534,8 +532,10 @@ private void preMergeRegions(final MasterProcedureEnv env) throws IOException { try { env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion); } catch (QuotaExceededException e) { - env.getMasterServices().getRegionNormalizer().planSkipped(this.mergedRegion, - NormalizationPlan.PlanType.MERGE); + // TODO: why is this here? merge requests can be submitted by actors other than the normalizer + env.getMasterServices() + .getRegionNormalizerManager() + .planSkipped(NormalizationPlan.PlanType.MERGE); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index d0413360e6df..0eb7667d7da3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,13 +71,11 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -181,9 +179,10 @@ private boolean hasBestSplitRow() { private void checkSplittable(final MasterProcedureEnv env, final RegionInfo regionToSplit, final byte[] splitRow) throws IOException { // Ask the remote RS if this region is splittable. - // If we get an IOE, report it along w/ the failure so can see why we are not splittable at this time. + // If we get an IOE, report it along w/ the failure so can see why we are not splittable at + // this time. if(regionToSplit.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke split on non-default regions directly"); + throw new IllegalArgumentException("Can't invoke split on non-default regions directly"); } RegionStateNode node = env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion()); @@ -570,8 +569,10 @@ private void preSplitRegion(final MasterProcedureEnv env) try { env.getMasterServices().getMasterQuotaManager().onRegionSplit(this.getParentRegion()); } catch (QuotaExceededException e) { - env.getMasterServices().getRegionNormalizer().planSkipped(this.getParentRegion(), - NormalizationPlan.PlanType.SPLIT); + // TODO: why is this here? split requests can be submitted by actors other than the normalizer + env.getMasterServices() + .getRegionNormalizerManager() + .planSkipped(NormalizationPlan.PlanType.SPLIT); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java index 17e313047d72..677b9ec8052e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java @@ -18,41 +18,35 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import java.io.IOException; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Normalization plan to merge regions (smallest region in the table with its smallest neighbor). + * Normalization plan to merge adjacent regions. As with any call to + * {@link MasterServices#mergeRegions(RegionInfo[], boolean, long, long)} + * with {@code forcible=false}, Region order and adjacency are important. It's the caller's + * responsibility to ensure the provided parameters are ordered according to the + * {code mergeRegions} method requirements. */ @InterfaceAudience.Private -public class MergeNormalizationPlan implements NormalizationPlan { +final class MergeNormalizationPlan implements NormalizationPlan { - private final RegionInfo firstRegion; - private final RegionInfo secondRegion; + private final List normalizationTargets; - public MergeNormalizationPlan(RegionInfo firstRegion, RegionInfo secondRegion) { - this.firstRegion = firstRegion; - this.secondRegion = secondRegion; - } - - /** - * {@inheritDoc} - */ - @Override - public long submit(MasterServices masterServices) throws IOException { - // Do not use force=true as corner cases can happen, non adjacent regions, - // merge with a merged child region with no GC done yet, it is going to - // cause all different issues. - return masterServices - .mergeRegions(new RegionInfo[] { firstRegion, secondRegion }, false, HConstants.NO_NONCE, - HConstants.NO_NONCE); + private MergeNormalizationPlan(List normalizationTargets) { + Preconditions.checkNotNull(normalizationTargets); + Preconditions.checkState(normalizationTargets.size() >= 2, + "normalizationTargets.size() must be >= 2 but was %s", normalizationTargets.size()); + this.normalizationTargets = Collections.unmodifiableList(normalizationTargets); } @Override @@ -60,19 +54,14 @@ public PlanType getType() { return PlanType.MERGE; } - RegionInfo getFirstRegion() { - return firstRegion; - } - - RegionInfo getSecondRegion() { - return secondRegion; + public List getNormalizationTargets() { + return normalizationTargets; } @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("firstRegion", firstRegion) - .append("secondRegion", secondRegion) + .append("normalizationTargets", normalizationTargets) .toString(); } @@ -89,16 +78,31 @@ public boolean equals(Object o) { MergeNormalizationPlan that = (MergeNormalizationPlan) o; return new EqualsBuilder() - .append(firstRegion, that.firstRegion) - .append(secondRegion, that.secondRegion) + .append(normalizationTargets, that.normalizationTargets) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) - .append(firstRegion) - .append(secondRegion) + .append(normalizationTargets) .toHashCode(); } + + /** + * A helper for constructing instances of {@link MergeNormalizationPlan}. + */ + static class Builder { + + private final List normalizationTargets = new LinkedList<>(); + + public Builder addTarget(final RegionInfo regionInfo, final long regionSizeMb) { + normalizationTargets.add(new NormalizationTarget(regionInfo, regionSizeMb)); + return this; + } + + public MergeNormalizationPlan build() { + return new MergeNormalizationPlan(normalizationTargets); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java index cd13f69e764e..3bfae14e0b7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +17,12 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; /** - * Interface for normalization plan. + * A {@link NormalizationPlan} describes some modification to region split points as identified + * by an instance of {@link RegionNormalizer}. It is a POJO describing what action needs taken + * and the regions it targets. */ @InterfaceAudience.Private public interface NormalizationPlan { @@ -33,15 +32,6 @@ enum PlanType { NONE } - /** - * Submits normalization plan on cluster (does actual splitting/merging work) and - * returns proc Id to caller. - * @param masterServices instance of {@link MasterServices} - * @return Proc Id for the submitted task - * @throws IOException If plan submission to Admin fails - */ - long submit(MasterServices masterServices) throws IOException; - /** * @return the type of this plan */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java new file mode 100644 index 000000000000..9e4b3f426403 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A POJO that caries details about a region selected for normalization through the pipeline. + */ +@InterfaceAudience.Private +class NormalizationTarget { + private final RegionInfo regionInfo; + private final long regionSizeMb; + + NormalizationTarget(final RegionInfo regionInfo, final long regionSizeMb) { + this.regionInfo = regionInfo; + this.regionSizeMb = regionSizeMb; + } + + public RegionInfo getRegionInfo() { + return regionInfo; + } + + public long getRegionSizeMb() { + return regionSizeMb; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + NormalizationTarget that = (NormalizationTarget) o; + + return new EqualsBuilder() + .append(regionSizeMb, that.regionSizeMb) + .append(regionInfo, that.regionInfo) + .isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 37) + .append(regionInfo) + .append(regionSizeMb) + .toHashCode(); + } + + @Override public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("regionInfo", regionInfo) + .append("regionSizeMb", regionSizeMb) + .toString(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java index 672171d1caff..6f939daeda92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java @@ -20,13 +20,9 @@ import java.util.List; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; /** * Performs "normalization" of regions of a table, making sure that suboptimal @@ -39,8 +35,7 @@ * "split/merge storms". */ @InterfaceAudience.Private -@InterfaceStability.Evolving -public interface RegionNormalizer extends Configurable { +interface RegionNormalizer extends Configurable { /** * Set the master service. Must be called before first call to * {@link #computePlansForTable(TableName)}. @@ -55,20 +50,5 @@ public interface RegionNormalizer extends Configurable { * @return A list of the normalization actions to perform, or an empty list * if there's nothing to do. */ - List computePlansForTable(TableName table) - throws HBaseIOException; - - /** - * Notification for the case where plan couldn't be executed due to constraint violation, such as - * namespace quota - * @param hri the region which is involved in the plan - * @param type type of plan - */ - void planSkipped(RegionInfo hri, PlanType type); - - /** - * @param type type of plan for which skipped count is to be returned - * @return the count of plans of specified type which were skipped - */ - long getSkippedCount(NormalizationPlan.PlanType type); + List computePlansForTable(TableName table); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java index 19d2dc7a3ba9..d56acc2a935e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,34 +17,35 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import java.io.IOException; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.master.HMaster; - -import java.io.IOException; /** - * Chore that will call {@link org.apache.hadoop.hbase.master.HMaster#normalizeRegions()} - * when needed. + * Chore that will periodically call + * {@link HMaster#normalizeRegions(NormalizeTableFilterParams, boolean)}. */ @InterfaceAudience.Private -public class RegionNormalizerChore extends ScheduledChore { +class RegionNormalizerChore extends ScheduledChore { private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerChore.class); - private final HMaster master; + private final MasterServices master; - public RegionNormalizerChore(HMaster master) { + public RegionNormalizerChore(MasterServices master) { super(master.getServerName() + "-RegionNormalizerChore", master, - master.getConfiguration().getInt("hbase.normalizer.period", 300000)); + master.getConfiguration().getInt("hbase.normalizer.period", 300_000)); this.master = master; } @Override protected void chore() { try { - master.normalizeRegions(); + master.normalizeRegions(new NormalizeTableFilterParams.Builder().build(), false); } catch (IOException e) { LOG.error("Failed to normalize regions.", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java index 06774c97a81e..92d16648fcd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Factory to create instance of {@link RegionNormalizer} as configured. @@ -32,13 +35,30 @@ public final class RegionNormalizerFactory { private RegionNormalizerFactory() { } + public static RegionNormalizerManager createNormalizerManager( + final Configuration conf, + final ZKWatcher zkWatcher, + final HMaster master // TODO: consolidate this down to MasterServices + ) { + final RegionNormalizer regionNormalizer = getRegionNormalizer(conf); + regionNormalizer.setMasterServices(master); + final RegionNormalizerTracker tracker = new RegionNormalizerTracker(zkWatcher, master); + final RegionNormalizerChore chore = + master.isInMaintenanceMode() ? null : new RegionNormalizerChore(master); + final RegionNormalizerWorkQueue workQueue = + master.isInMaintenanceMode() ? null : new RegionNormalizerWorkQueue<>(); + final RegionNormalizerWorker worker = master.isInMaintenanceMode() + ? null + : new RegionNormalizerWorker(conf, master, regionNormalizer, workQueue); + return new RegionNormalizerManager(tracker, chore, workQueue, worker); + } + /** * Create a region normalizer from the given conf. * @param conf configuration * @return {@link RegionNormalizer} implementation */ - public static RegionNormalizer getRegionNormalizer(Configuration conf) { - + private static RegionNormalizer getRegionNormalizer(Configuration conf) { // Create instance of Region Normalizer Class balancerKlass = conf.getClass(HConstants.HBASE_MASTER_NORMALIZER_CLASS, SimpleRegionNormalizer.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java new file mode 100644 index 000000000000..e818519d6513 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * This class encapsulates the details of the {@link RegionNormalizer} subsystem. + */ +@InterfaceAudience.Private +public class RegionNormalizerManager { + private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerManager.class); + + private final RegionNormalizerTracker regionNormalizerTracker; + private final RegionNormalizerChore regionNormalizerChore; + private final RegionNormalizerWorkQueue workQueue; + private final RegionNormalizerWorker worker; + private final ExecutorService pool; + + private final Object startStopLock = new Object(); + private boolean started = false; + private boolean stopped = false; + + public RegionNormalizerManager( + @NonNull final RegionNormalizerTracker regionNormalizerTracker, + @Nullable final RegionNormalizerChore regionNormalizerChore, + @Nullable final RegionNormalizerWorkQueue workQueue, + @Nullable final RegionNormalizerWorker worker + ) { + this.regionNormalizerTracker = regionNormalizerTracker; + this.regionNormalizerChore = regionNormalizerChore; + this.workQueue = workQueue; + this.worker = worker; + this.pool = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("normalizer-worker-%d") + .setUncaughtExceptionHandler( + (thread, throwable) -> + LOG.error("Uncaught exception, worker thread likely terminated.", throwable)) + .build()); + } + + public void start() { + synchronized (startStopLock) { + if (started) { + return; + } + regionNormalizerTracker.start(); + if (worker != null) { + // worker will be null when master is in maintenance mode. + pool.submit(worker); + } + started = true; + } + } + + public void stop() { + synchronized (startStopLock) { + if (!started) { + throw new IllegalStateException("calling `stop` without first calling `start`."); + } + if (stopped) { + return; + } + pool.shutdownNow(); // shutdownNow to interrupt the worker thread sitting on `take()` + regionNormalizerTracker.stop(); + stopped = true; + } + } + + public ScheduledChore getRegionNormalizerChore() { + return regionNormalizerChore; + } + + /** + * Return {@code true} if region normalizer is on, {@code false} otherwise + */ + public boolean isNormalizerOn() { + return regionNormalizerTracker.isNormalizerOn(); + } + + /** + * Set region normalizer on/off + * @param normalizerOn whether normalizer should be on or off + */ + public void setNormalizerOn(boolean normalizerOn) { + try { + regionNormalizerTracker.setNormalizerOn(normalizerOn); + } catch (KeeperException e) { + LOG.warn("Error flipping normalizer switch", e); + } + } + + /** + * Call-back for the case where plan couldn't be executed due to constraint violation, + * such as namespace quota. + * @param type type of plan that was skipped. + */ + public void planSkipped(NormalizationPlan.PlanType type) { + // TODO: this appears to be used only for testing. + if (worker != null) { + worker.planSkipped(type); + } + } + + /** + * Retrieve a count of the number of times plans of type {@code type} were submitted but skipped. + * @param type type of plan for which skipped count is to be returned + */ + public long getSkippedCount(NormalizationPlan.PlanType type) { + // TODO: this appears to be used only for testing. + return worker == null ? 0 : worker.getSkippedCount(type); + } + + /** + * Return the number of times a {@link SplitNormalizationPlan} has been submitted. + */ + public long getSplitPlanCount() { + return worker == null ? 0 : worker.getSplitPlanCount(); + } + + /** + * Return the number of times a {@link MergeNormalizationPlan} has been submitted. + */ + public long getMergePlanCount() { + return worker == null ? 0 : worker.getMergePlanCount(); + } + + /** + * Submit tables for normalization. + * @param tables a list of tables to submit. + * @param isHighPriority {@code true} when these requested tables should skip to the front of + * the queue. + * @return {@code true} when work was queued, {@code false} otherwise. + */ + public boolean normalizeRegions(List tables, boolean isHighPriority) { + if (workQueue == null) { + return false; + } + if (isHighPriority) { + workQueue.putAllFirst(tables); + } else { + workQueue.putAll(tables); + } + return true; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java new file mode 100644 index 000000000000..5ebb4f9ad08d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A specialized collection that holds pending work for the {@link RegionNormalizerWorker}. It is + * an ordered collection class that has the following properties: + *
      + *
    • Guarantees uniqueness of elements, as a {@link Set}.
    • + *
    • Consumers retrieve objects from the head, as a {@link Queue}, via {@link #take()}.
    • + *
    • Work is retrieved on a FIFO policy.
    • + *
    • Work retrieval blocks the calling thread until new work is available, as a + * {@link BlockingQueue}.
    • + *
    • Allows a producer to insert an item at the head of the queue, if desired.
    • + *
    + * Assumes low-frequency and low-parallelism concurrent access, so protects state using a + * simplistic synchronization strategy. + */ +@InterfaceAudience.Private +class RegionNormalizerWorkQueue { + + /** Underlying storage structure that gives us the Set behavior and FIFO retrieval policy. */ + private LinkedHashSet delegate; + + // the locking structure used here follows the example found in LinkedBlockingQueue. The + // difference is that our locks guard access to `delegate` rather than the head node. + + /** Lock held by take, poll, etc */ + private final ReentrantLock takeLock; + + /** Wait queue for waiting takes */ + private final Condition notEmpty; + + /** Lock held by put, offer, etc */ + private final ReentrantLock putLock; + + RegionNormalizerWorkQueue() { + delegate = new LinkedHashSet<>(); + takeLock = new ReentrantLock(); + notEmpty = takeLock.newCondition(); + putLock = new ReentrantLock(); + } + + /** + * Signals a waiting take. Called only from put/offer (which do not + * otherwise ordinarily lock takeLock.) + */ + private void signalNotEmpty() { + final ReentrantLock takeLock = this.takeLock; + takeLock.lock(); + try { + notEmpty.signal(); + } finally { + takeLock.unlock(); + } + } + + /** + * Locks to prevent both puts and takes. + */ + private void fullyLock() { + putLock.lock(); + takeLock.lock(); + } + + /** + * Unlocks to allow both puts and takes. + */ + private void fullyUnlock() { + takeLock.unlock(); + putLock.unlock(); + } + + /** + * Inserts the specified element at the tail of the queue, if it's not already present. + * + * @param e the element to add + */ + public void put(E e) { + if (e == null) { + throw new NullPointerException(); + } + + putLock.lock(); + try { + delegate.add(e); + } finally { + putLock.unlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Inserts the specified element at the head of the queue. + * + * @param e the element to add + */ + public void putFirst(E e) { + if (e == null) { + throw new NullPointerException(); + } + putAllFirst(Collections.singleton(e)); + } + + /** + * Inserts the specified elements at the tail of the queue. Any elements already present in + * the queue are ignored. + * + * @param c the elements to add + */ + public void putAll(Collection c) { + if (c == null) { + throw new NullPointerException(); + } + + putLock.lock(); + try { + delegate.addAll(c); + } finally { + putLock.unlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Inserts the specified elements at the head of the queue. + * + * @param c the elements to add + */ + public void putAllFirst(Collection c) { + if (c == null) { + throw new NullPointerException(); + } + + fullyLock(); + try { + final LinkedHashSet copy = new LinkedHashSet<>(c.size() + delegate.size()); + copy.addAll(c); + copy.addAll(delegate); + delegate = copy; + } finally { + fullyUnlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Retrieves and removes the head of this queue, waiting if necessary + * until an element becomes available. + * + * @return the head of this queue + * @throws InterruptedException if interrupted while waiting + */ + public E take() throws InterruptedException { + E x; + takeLock.lockInterruptibly(); + try { + while (delegate.isEmpty()) { + notEmpty.await(); + } + final Iterator iter = delegate.iterator(); + x = iter.next(); + iter.remove(); + if (!delegate.isEmpty()) { + notEmpty.signal(); + } + } finally { + takeLock.unlock(); + } + return x; + } + + /** + * Atomically removes all of the elements from this queue. + * The queue will be empty after this call returns. + */ + public void clear() { + putLock.lock(); + try { + delegate.clear(); + } finally { + putLock.unlock(); + } + } + + /** + * Returns the number of elements in this queue. + * + * @return the number of elements in this queue + */ + public int size() { + takeLock.lock(); + try { + return delegate.size(); + } finally { + takeLock.unlock(); + } + } + + @Override + public String toString() { + takeLock.lock(); + try { + return delegate.toString(); + } finally { + takeLock.unlock(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java new file mode 100644 index 000000000000..30f9fc25364d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import java.io.IOException; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.RateLimiter; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; + +/** + * Consumes normalization request targets ({@link TableName}s) off the + * {@link RegionNormalizerWorkQueue}, dispatches them to the {@link RegionNormalizer}, + * and executes the resulting {@link NormalizationPlan}s. + */ +@InterfaceAudience.Private +class RegionNormalizerWorker implements Runnable { + private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerWorker.class); + + static final String RATE_LIMIT_BYTES_PER_SEC_KEY = + "hbase.normalizer.throughput.max_bytes_per_sec"; + private static final long RATE_UNLIMITED_BYTES = 1_000_000_000_000L; // 1TB/sec + + private final MasterServices masterServices; + private final RegionNormalizer regionNormalizer; + private final RegionNormalizerWorkQueue workQueue; + private final RateLimiter rateLimiter; + + private final long[] skippedCount; + private long splitPlanCount; + private long mergePlanCount; + + RegionNormalizerWorker( + final Configuration configuration, + final MasterServices masterServices, + final RegionNormalizer regionNormalizer, + final RegionNormalizerWorkQueue workQueue + ) { + this.masterServices = masterServices; + this.regionNormalizer = regionNormalizer; + this.workQueue = workQueue; + this.skippedCount = new long[NormalizationPlan.PlanType.values().length]; + this.splitPlanCount = 0; + this.mergePlanCount = 0; + this.rateLimiter = loadRateLimiter(configuration); + } + + private static RateLimiter loadRateLimiter(final Configuration configuration) { + long rateLimitBytes = + configuration.getLongBytes(RATE_LIMIT_BYTES_PER_SEC_KEY, RATE_UNLIMITED_BYTES); + long rateLimitMbs = rateLimitBytes / 1_000_000L; + if (rateLimitMbs <= 0) { + LOG.warn("Configured value {}={} is <= 1MB. Falling back to default.", + RATE_LIMIT_BYTES_PER_SEC_KEY, rateLimitBytes); + rateLimitBytes = RATE_UNLIMITED_BYTES; + rateLimitMbs = RATE_UNLIMITED_BYTES / 1_000_000L; + } + LOG.info("Normalizer rate limit set to {}", + rateLimitBytes == RATE_UNLIMITED_BYTES ? "unlimited" : rateLimitMbs + " MB/sec"); + return RateLimiter.create(rateLimitMbs); + } + + /** + * @see RegionNormalizerManager#planSkipped(NormalizationPlan.PlanType) + */ + void planSkipped(NormalizationPlan.PlanType type) { + synchronized (skippedCount) { + // updates come here via procedure threads, so synchronize access to this counter. + skippedCount[type.ordinal()]++; + } + } + + /** + * @see RegionNormalizerManager#getSkippedCount(NormalizationPlan.PlanType) + */ + long getSkippedCount(NormalizationPlan.PlanType type) { + return skippedCount[type.ordinal()]; + } + + /** + * @see RegionNormalizerManager#getSplitPlanCount() + */ + long getSplitPlanCount() { + return splitPlanCount; + } + + /** + * @see RegionNormalizerManager#getMergePlanCount() + */ + long getMergePlanCount() { + return mergePlanCount; + } + + @Override + public void run() { + while (true) { + if (Thread.interrupted()) { + LOG.debug("interrupt detected. terminating."); + break; + } + final TableName tableName; + try { + tableName = workQueue.take(); + } catch (InterruptedException e) { + LOG.debug("interrupt detected. terminating."); + break; + } + + final List plans = calculatePlans(tableName); + submitPlans(plans); + } + } + + private List calculatePlans(final TableName tableName) { + if (masterServices.skipRegionManagementAction("region normalizer")) { + return Collections.emptyList(); + } + + try { + final TableDescriptor tblDesc = masterServices.getTableDescriptors().get(tableName); + if (tblDesc != null && !tblDesc.isNormalizationEnabled()) { + LOG.debug("Skipping table {} because normalization is disabled in its table properties.", + tableName); + return Collections.emptyList(); + } + } catch (IOException e) { + LOG.debug("Skipping table {} because unable to access its table descriptor.", tableName, e); + return Collections.emptyList(); + } + + final List plans = regionNormalizer.computePlansForTable(tableName); + if (CollectionUtils.isEmpty(plans)) { + LOG.debug("No normalization required for table {}.", tableName); + return Collections.emptyList(); + } + return plans; + } + + private void submitPlans(final List plans) { + // as of this writing, `plan.submit()` is non-blocking and uses Async Admin APIs to submit + // task, so there's no artificial rate-limiting of merge/split requests due to this serial loop. + for (NormalizationPlan plan : plans) { + switch (plan.getType()) { + case MERGE: { + submitMergePlan((MergeNormalizationPlan) plan); + break; + } + case SPLIT: { + submitSplitPlan((SplitNormalizationPlan) plan); + break; + } + case NONE: + LOG.debug("Nothing to do for {} with PlanType=NONE. Ignoring.", plan); + planSkipped(plan.getType()); + break; + default: + LOG.warn("Plan {} is of an unrecognized PlanType. Ignoring.", plan); + planSkipped(plan.getType()); + break; + } + } + } + + /** + * Interacts with {@link MasterServices} in order to execute a plan. + */ + private void submitMergePlan(final MergeNormalizationPlan plan) { + final int totalSizeMb; + try { + final long totalSizeMbLong = plan.getNormalizationTargets() + .stream() + .mapToLong(NormalizationTarget::getRegionSizeMb) + .reduce(0, Math::addExact); + totalSizeMb = Math.toIntExact(totalSizeMbLong); + } catch (ArithmeticException e) { + LOG.debug("Sum of merge request size overflows rate limiter data type. {}", plan); + planSkipped(plan.getType()); + return; + } + + final RegionInfo[] infos = plan.getNormalizationTargets() + .stream() + .map(NormalizationTarget::getRegionInfo) + .toArray(RegionInfo[]::new); + final long pid; + try { + pid = masterServices.mergeRegions( + infos, false, HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (IOException e) { + LOG.info("failed to submit plan {}.", plan, e); + planSkipped(plan.getType()); + return; + } + mergePlanCount++; + LOG.info("Submitted {} resulting in pid {}", plan, pid); + final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb))); + LOG.debug("Rate limiting delayed the worker by {}", Duration.ofSeconds(rateLimitedSecs)); + } + + /** + * Interacts with {@link MasterServices} in order to execute a plan. + */ + private void submitSplitPlan(final SplitNormalizationPlan plan) { + final int totalSizeMb; + try { + totalSizeMb = Math.toIntExact(plan.getSplitTarget().getRegionSizeMb()); + } catch (ArithmeticException e) { + LOG.debug("Split request size overflows rate limiter data type. {}", plan); + planSkipped(plan.getType()); + return; + } + final RegionInfo info = plan.getSplitTarget().getRegionInfo(); + final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb))); + LOG.debug("Rate limiting delayed this operation by {}", Duration.ofSeconds(rateLimitedSecs)); + + final long pid; + try { + pid = masterServices.splitRegion( + info, null, HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (IOException e) { + LOG.info("failed to submit plan {}.", plan, e); + planSkipped(plan.getType()); + return; + } + splitPlanCount++; + LOG.info("Submitted {} resulting in pid {}", plan, pid); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index a904e17f7b0f..a641a0aa25b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.assignment.RegionStates; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -54,29 +53,9 @@ *
  • Otherwise, for the next region in the chain R1, if R0 + R1 is smaller then S, R0 and R1 * are kindly requested to merge.
  • * - *

    - * The following parameters are configurable: - *

      - *
    1. Whether to split a region as part of normalization. Configuration: - * {@value #SPLIT_ENABLED_KEY}, default: {@value #DEFAULT_SPLIT_ENABLED}.
    2. - *
    3. Whether to merge a region as part of normalization. Configuration: - * {@value #MERGE_ENABLED_KEY}, default: {@value #DEFAULT_MERGE_ENABLED}.
    4. - *
    5. The minimum number of regions in a table to consider it for merge normalization. - * Configuration: {@value #MIN_REGION_COUNT_KEY}, default: - * {@value #DEFAULT_MIN_REGION_COUNT}.
    6. - *
    7. The minimum age for a region to be considered for a merge, in days. Configuration: - * {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}, default: - * {@value #DEFAULT_MERGE_MIN_REGION_AGE_DAYS}.
    8. - *
    9. The minimum size for a region to be considered for a merge, in whole MBs. Configuration: - * {@value #MERGE_MIN_REGION_SIZE_MB_KEY}, default: - * {@value #DEFAULT_MERGE_MIN_REGION_SIZE_MB}.
    10. - *
    - *

    - * To see detailed logging of the application of these configuration values, set the log level for - * this class to `TRACE`. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class SimpleRegionNormalizer implements RegionNormalizer { +class SimpleRegionNormalizer implements RegionNormalizer { private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class); static final String SPLIT_ENABLED_KEY = "hbase.normalizer.split.enabled"; @@ -92,7 +71,6 @@ public class SimpleRegionNormalizer implements RegionNormalizer { static final String MERGE_MIN_REGION_SIZE_MB_KEY = "hbase.normalizer.merge.min_region_size.mb"; static final int DEFAULT_MERGE_MIN_REGION_SIZE_MB = 1; - private final long[] skippedCount; private Configuration conf; private MasterServices masterServices; private boolean splitEnabled; @@ -102,7 +80,6 @@ public class SimpleRegionNormalizer implements RegionNormalizer { private int mergeMinRegionSizeMb; public SimpleRegionNormalizer() { - skippedCount = new long[NormalizationPlan.PlanType.values().length]; splitEnabled = DEFAULT_SPLIT_ENABLED; mergeEnabled = DEFAULT_MERGE_ENABLED; minRegionCount = DEFAULT_MIN_REGION_COUNT; @@ -203,16 +180,6 @@ public void setMasterServices(final MasterServices masterServices) { this.masterServices = masterServices; } - @Override - public void planSkipped(final RegionInfo hri, final PlanType type) { - skippedCount[type.ordinal()]++; - } - - @Override - public long getSkippedCount(NormalizationPlan.PlanType type) { - return skippedCount[type.ordinal()]; - } - @Override public List computePlansForTable(final TableName table) { if (table == null) { @@ -371,7 +338,11 @@ private List computeMergeNormalizationPlans(final NormalizeCo final long nextSizeMb = getRegionSizeMB(next); // always merge away empty regions when they present themselves. if (currentSizeMb == 0 || nextSizeMb == 0 || currentSizeMb + nextSizeMb < avgRegionSizeMb) { - plans.add(new MergeNormalizationPlan(current, next)); + final MergeNormalizationPlan plan = new MergeNormalizationPlan.Builder() + .addTarget(current, currentSizeMb) + .addTarget(next, nextSizeMb) + .build(); + plans.add(plan); candidateIdx++; } } @@ -408,11 +379,11 @@ private List computeSplitNormalizationPlans(final NormalizeCo if (skipForSplit(ctx.getRegionStates().getRegionState(hri), hri)) { continue; } - final long regionSize = getRegionSizeMB(hri); - if (regionSize > 2 * avgRegionSize) { + final long regionSizeMb = getRegionSizeMB(hri); + if (regionSizeMb > 2 * avgRegionSize) { LOG.info("Table {}, large region {} has size {}, more than twice avg size {}, splitting", - ctx.getTableName(), hri.getRegionNameAsString(), regionSize, avgRegionSize); - plans.add(new SplitNormalizationPlan(hri)); + ctx.getTableName(), hri.getRegionNameAsString(), regionSizeMb, avgRegionSize); + plans.add(new SplitNormalizationPlan(hri, regionSizeMb)); } } return plans; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java index 7c634fbf2488..ffe68cc9f62d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java @@ -18,32 +18,23 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import java.io.IOException; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; /** - * Normalization plan to split region. + * Normalization plan to split a region. */ @InterfaceAudience.Private -public class SplitNormalizationPlan implements NormalizationPlan { +final class SplitNormalizationPlan implements NormalizationPlan { - private final RegionInfo regionInfo; + private final NormalizationTarget splitTarget; - public SplitNormalizationPlan(RegionInfo regionInfo) { - this.regionInfo = regionInfo; - } - - @Override - public long submit(MasterServices masterServices) throws IOException { - return masterServices.splitRegion(regionInfo, null, HConstants.NO_NONCE, - HConstants.NO_NONCE); + SplitNormalizationPlan(final RegionInfo splitTarget, final long splitTargetSizeMb) { + this.splitTarget = new NormalizationTarget(splitTarget, splitTargetSizeMb); } @Override @@ -51,14 +42,14 @@ public PlanType getType() { return PlanType.SPLIT; } - public RegionInfo getRegionInfo() { - return regionInfo; + public NormalizationTarget getSplitTarget() { + return splitTarget; } @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("regionInfo", regionInfo) + .append("splitTarget", splitTarget) .toString(); } @@ -75,13 +66,13 @@ public boolean equals(Object o) { SplitNormalizationPlan that = (SplitNormalizationPlan) o; return new EqualsBuilder() - .append(regionInfo, that.regionInfo) + .append(splitTarget, that.splitTarget) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) - .append(regionInfo) + .append(splitTarget) .toHashCode(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java new file mode 100644 index 000000000000..e3180347dc34 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The Region Normalizer subsystem is responsible for coaxing all the regions in a table toward + * a "normal" size, according to their storefile size. It does this by splitting regions that + * are significantly larger than the norm, and merging regions that are significantly smaller than + * the norm. + *

    + * The public interface to the Region Normalizer subsystem is limited to the following classes: + *
      + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory} provides an + * entry point for creating an instance of the + * {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager}. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager} encapsulates + * the whole Region Normalizer subsystem. You'll find one of these hanging off of the + * {@link org.apache.hadoop.hbase.master.HMaster}, which uses it to delegate API calls. There + * is usually only a single instance of this class. + *
    • + *
    • + * Various configuration points that share the common prefix of {@code hbase.normalizer}. + *
        + *
      • Whether to split a region as part of normalization. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#SPLIT_ENABLED_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_SPLIT_ENABLED}. + *
      • + *
      • Whether to merge a region as part of normalization. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_ENABLED_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_ENABLED}. + *
      • + *
      • The minimum number of regions in a table to consider it for merge normalization. + * Configuration: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MIN_REGION_COUNT_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MIN_REGION_COUNT}. + *
      • + *
      • The minimum age for a region to be considered for a merge, in days. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_MIN_REGION_AGE_DAYS_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_MIN_REGION_AGE_DAYS}. + *
      • + *
      • The minimum size for a region to be considered for a merge, in whole MBs. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_MIN_REGION_SIZE_MB_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_MIN_REGION_SIZE_MB}. + *
      • + *
      • The limit on total throughput of the Region Normalizer's actions, in whole MBs. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker#RATE_LIMIT_BYTES_PER_SEC_KEY}, + * default: unlimited. + *
      • + *
      + *

      + * To see detailed logging of the application of these configuration values, set the log + * level for this package to `TRACE`. + *

      + *
    • + *
    + * The Region Normalizer subsystem is composed of a handful of related classes: + *
      + *
    • + * The {@link org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker} provides a system by + * which the Normalizer can be disabled at runtime. It currently does this by managing a znode, + * but this is an implementation detail. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue} is a + * {@link java.util.Set}-like {@link java.util.Queue} that permits a single copy of a given + * work item to exist in the queue at one time. It also provides a facility for a producer to + * add an item to the front of the line. Consumers are blocked waiting for new work. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore} wakes up + * periodically and schedules new normalization work, adding targets to the queue. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker} runs in a + * daemon thread, grabbing work off the queue as is it becomes available. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer} implements the + * logic for calculating target region sizes and emitting a list of corresponding + * {@link org.apache.hadoop.hbase.master.normalizer.NormalizationPlan} objects. + *
    • + *
    + */ +package org.apache.hadoop.hbase.master.normalizer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 7c65005de55d..3f3e80960bb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.master; import static org.mockito.Mockito.mock; - import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; @@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; - import org.apache.hbase.thirdparty.com.google.protobuf.Service; public class MockNoopMasterServices implements MasterServices { @@ -109,11 +108,6 @@ public ChoreService getChoreService() { return null; } - @Override - public RegionNormalizer getRegionNormalizer() { - return null; - } - @Override public CatalogJanitor getCatalogJanitor() { return null; @@ -139,6 +133,10 @@ public MasterQuotaManager getMasterQuotaManager() { return null; } + @Override public RegionNormalizerManager getRegionNormalizerManager() { + return null; + } + @Override public ProcedureExecutor getMasterProcedureExecutor() { return null; @@ -341,6 +339,10 @@ public boolean isInMaintenanceMode() { return false; } + @Override public boolean skipRegionManagementAction(String action) { + return false; + } + @Override public long getLastMajorCompactionTimestamp(TableName table) throws IOException { return 0; @@ -507,4 +509,9 @@ public RSGroupInfoManager getRSGroupInfoManager() { public boolean isBalancerOn() { return false; } + + @Override + public boolean normalizeRegions(NormalizeTableFilterParams ntfp, boolean isHighPriority) { + return false; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java index 5aec49bdb11c..87a7e680ff8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.master; import java.lang.reflect.Field; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.ScheduledChore; @@ -30,7 +29,6 @@ import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.AfterClass; @@ -66,7 +64,7 @@ public static void tearDown() throws Exception { } @Test - public void testDefaultScheduledChores() throws Exception { + public void testDefaultScheduledChores() { // test if logCleaner chore is scheduled by default in HMaster init TestChoreField logCleanerTestChoreField = new TestChoreField<>(); LogCleaner logCleaner = logCleanerTestChoreField.getChoreObj("logCleaner"); @@ -96,10 +94,10 @@ public void testDefaultScheduledChores() throws Exception { balancerChoreTestChoreField.testIfChoreScheduled(balancerChore); // test if normalizerChore chore is scheduled by default in HMaster init - TestChoreField regionNormalizerChoreTestChoreField = + ScheduledChore regionNormalizerChore = hMaster.getRegionNormalizerManager() + .getRegionNormalizerChore(); + TestChoreField regionNormalizerChoreTestChoreField = new TestChoreField<>(); - RegionNormalizerChore regionNormalizerChore = regionNormalizerChoreTestChoreField - .getChoreObj("normalizerChore"); regionNormalizerChoreTestChoreField.testIfChoreScheduled(regionNormalizerChore); // test if catalogJanitorChore chore is scheduled by default in HMaster init @@ -114,22 +112,27 @@ public void testDefaultScheduledChores() throws Exception { hbckChoreTestChoreField.testIfChoreScheduled(hbckChore); } - + /** + * Reflect into the {@link HMaster} instance and find by field name a specified instance + * of {@link ScheduledChore}. + */ private static class TestChoreField { - private E getChoreObj(String fieldName) throws NoSuchFieldException, - IllegalAccessException { - Field masterField = HMaster.class.getDeclaredField(fieldName); - masterField.setAccessible(true); - E choreFieldVal = (E) masterField.get(hMaster); - return choreFieldVal; + @SuppressWarnings("unchecked") + private E getChoreObj(String fieldName) { + try { + Field masterField = HMaster.class.getDeclaredField(fieldName); + masterField.setAccessible(true); + return (E) masterField.get(hMaster); + } catch (Exception e) { + throw new AssertionError( + "Unable to retrieve field '" + fieldName + "' from HMaster instance.", e); + } } private void testIfChoreScheduled(E choreObj) { Assert.assertNotNull(choreObj); Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(choreObj)); } - } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java index ff88be1ef20d..6ac68b300483 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java @@ -72,8 +72,10 @@ public static void teardown() throws Exception { public void testInfo() { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master); - assertEquals(master.getSplitPlanCount(), info.getSplitPlanCount(), 0); - assertEquals(master.getMergePlanCount(), info.getMergePlanCount(), 0); + assertEquals( + master.getRegionNormalizerManager().getSplitPlanCount(), info.getSplitPlanCount(), 0); + assertEquals( + master.getRegionNormalizerManager().getMergePlanCount(), info.getMergePlanCount(), 0); assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0); assertEquals(master.getClusterId(), info.getClusterId()); assertEquals(master.getMasterActiveTime(), info.getActiveTime()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java new file mode 100644 index 000000000000..7e6c74910edf --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Tests that {@link RegionNormalizerWorkQueue} implements the contract described in its docstring. + */ +@Category({ MasterTests.class, SmallTests.class}) +public class TestRegionNormalizerWorkQueue { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionNormalizerWorkQueue.class); + + @Rule + public TestName testName = new TestName(); + + @Test + public void testElementUniquenessAndFIFO() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final List content = new LinkedList<>(); + IntStream.of(4, 3, 2, 1, 4, 3, 2, 1) + .boxed() + .forEach(queue::put); + assertEquals(4, queue.size()); + while (queue.size() > 0) { + content.add(queue.take()); + } + assertThat(content, contains(4, 3, 2, 1)); + + queue.clear(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + queue.putAll(Arrays.asList(4, 5)); + assertEquals(5, queue.size()); + content.clear(); + while (queue.size() > 0) { + content.add(queue.take()); + } + assertThat(content, contains(4, 3, 2, 1, 5)); + } + + @Test + public void testPriorityAndFIFO() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final List content = new LinkedList<>(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + assertEquals(4, queue.size()); + queue.putFirst(0); + assertEquals(5, queue.size()); + drainTo(queue, content); + assertThat("putFirst items should jump the queue, preserving existing order", + content, contains(0, 4, 3, 2, 1)); + + queue.clear(); + content.clear(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + queue.putFirst(1); + assertEquals(4, queue.size()); + drainTo(queue, content); + assertThat("existing items re-added with putFirst should jump the queue", + content, contains(1, 4, 3, 2)); + + queue.clear(); + content.clear(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + queue.putAllFirst(Arrays.asList(2, 3)); + assertEquals(4, queue.size()); + drainTo(queue, content); + assertThat( + "existing items re-added with putAllFirst jump the queue AND honor changes in priority", + content, contains(2, 3, 4, 1)); + } + + private enum Action { + PUT, + PUT_FIRST, + PUT_ALL, + PUT_ALL_FIRST, + } + + /** + * Test that the uniqueness constraint is honored in the face of concurrent modification. + */ + @Test + public void testConcurrentPut() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final int maxValue = 100; + final Runnable producer = () -> { + final Random rand = ThreadLocalRandom.current(); + for (int i = 0; i < 1_000; i++) { + final Action action = Action.values()[rand.nextInt(Action.values().length)]; + switch (action) { + case PUT: { + final int val = rand.nextInt(maxValue); + queue.put(val); + break; + } + case PUT_FIRST: { + final int val = rand.nextInt(maxValue); + queue.putFirst(val); + break; + } + case PUT_ALL: { + final List vals = rand.ints(5, 0, maxValue) + .boxed() + .collect(Collectors.toList()); + queue.putAll(vals); + break; + } + case PUT_ALL_FIRST: { + final List vals = rand.ints(5, 0, maxValue) + .boxed() + .collect(Collectors.toList()); + queue.putAllFirst(vals); + break; + } + default: + fail("Unrecognized action " + action); + } + } + }; + + final int numThreads = 5; + final CompletableFuture[] futures = IntStream.range(0, numThreads) + .mapToObj(val -> CompletableFuture.runAsync(producer)) + .toArray(CompletableFuture[]::new); + CompletableFuture.allOf(futures).join(); + + final List content = new ArrayList<>(queue.size()); + drainTo(queue, content); + assertThat("at most `maxValue` items should be present.", + content.size(), lessThanOrEqualTo(maxValue)); + assertEquals("all items should be unique.", content.size(), new HashSet<>(content).size()); + } + + /** + * Test that calls to {@link RegionNormalizerWorkQueue#take()} block the requesting thread. The + * producing thread places new entries onto the queue following a known schedule. The consuming + * thread collects a time measurement between calls to {@code take}. Finally, the test makes + * coarse-grained assertions of the consumer's observations based on the producer's schedule. + */ + @Test + public void testTake() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final ConcurrentLinkedQueue takeTimes = new ConcurrentLinkedQueue<>(); + final AtomicBoolean finished = new AtomicBoolean(false); + final Runnable consumer = () -> { + try { + while (!finished.get()) { + queue.take(); + takeTimes.add(System.nanoTime()); + } + } catch (InterruptedException e) { + fail("interrupted."); + } + }; + + CompletableFuture worker = CompletableFuture.runAsync(consumer); + final long testStart = System.nanoTime(); + for (int i = 0; i < 5; i++) { + Thread.sleep(10); + queue.put(i); + } + + // set finished = true and pipe one more value in case the thread needs an extra pass through + // the loop. + finished.set(true); + queue.put(1); + worker.get(1, TimeUnit.SECONDS); + + final Iterator times = takeTimes.iterator(); + assertTrue("should have timing information for at least 2 calls to take.", + takeTimes.size() >= 5); + for (int i = 0; i < 5; i++) { + assertThat( + "Observations collected in takeTimes should increase by roughly 10ms every interval", + times.next(), greaterThan(testStart + TimeUnit.MILLISECONDS.toNanos(i * 10))); + } + } + + private static void drainTo(final RegionNormalizerWorkQueue queue, Collection dest) + throws InterruptedException { + assertThat(queue.size(), greaterThan(0)); + while (queue.size() > 0) { + dest.add(queue.take()); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java new file mode 100644 index 000000000000..e3a29b854060 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.when; +import java.time.Duration; +import java.util.Arrays; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.StringDescription; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * A test over {@link RegionNormalizerWorker}. Being a background thread, the only points of + * interaction we have to this class are its input source ({@link RegionNormalizerWorkQueue} and + * its callbacks invoked against {@link RegionNormalizer} and {@link MasterServices}. The work + * queue is simple enough to use directly; for {@link MasterServices}, use a mock because, as of + * now, the worker only invokes 4 methods. + */ +@Category({ MasterTests.class, SmallTests.class}) +public class TestRegionNormalizerWorker { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionNormalizerWorker.class); + + @Rule + public TestName testName = new TestName(); + @Rule + public TableNameTestRule tableName = new TableNameTestRule(); + + @Rule + public MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private MasterServices masterServices; + @Mock + private RegionNormalizer regionNormalizer; + + private HBaseCommonTestingUtility testingUtility; + private RegionNormalizerWorkQueue queue; + private ExecutorService workerPool; + + private final AtomicReference workerThreadThrowable = new AtomicReference<>(); + + @Before + public void before() throws Exception { + MockitoAnnotations.initMocks(this); + when(masterServices.skipRegionManagementAction(any())).thenReturn(false); + testingUtility = new HBaseCommonTestingUtility(); + queue = new RegionNormalizerWorkQueue<>(); + workerThreadThrowable.set(null); + + final String threadNameFmt = + TestRegionNormalizerWorker.class.getSimpleName() + "-" + testName.getMethodName() + "-%d"; + final ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat(threadNameFmt) + .setDaemon(true) + .setUncaughtExceptionHandler((t, e) -> workerThreadThrowable.set(e)) + .build(); + workerPool = Executors.newSingleThreadExecutor(threadFactory); + } + + @After + public void after() throws Exception { + workerPool.shutdownNow(); // shutdownNow to interrupt the worker thread sitting on `take()` + assertTrue("timeout waiting for worker thread to terminate", + workerPool.awaitTermination(30, TimeUnit.SECONDS)); + final Throwable workerThrowable = workerThreadThrowable.get(); + assertThat("worker thread threw unexpected exception", workerThrowable, nullValue()); + } + + @Test + public void testMergeCounter() throws Exception { + final TableName tn = tableName.getTableName(); + final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) + .setNormalizationEnabled(true) + .build(); + when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); + when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())) + .thenReturn(1L); + when(regionNormalizer.computePlansForTable(tn)) + .thenReturn(singletonList(new MergeNormalizationPlan.Builder() + .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 10) + .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 20) + .build())); + + final RegionNormalizerWorker worker = new RegionNormalizerWorker( + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + final long beforeMergePlanCount = worker.getMergePlanCount(); + workerPool.submit(worker); + queue.put(tn); + + assertThatEventually("executing work should see plan count increase", + worker::getMergePlanCount, greaterThan(beforeMergePlanCount)); + } + + @Test + public void testSplitCounter() throws Exception { + final TableName tn = tableName.getTableName(); + final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) + .setNormalizationEnabled(true) + .build(); + when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); + when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())) + .thenReturn(1L); + when(regionNormalizer.computePlansForTable(tn)) + .thenReturn(singletonList( + new SplitNormalizationPlan(RegionInfoBuilder.newBuilder(tn).build(), 10))); + + final RegionNormalizerWorker worker = new RegionNormalizerWorker( + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + final long beforeSplitPlanCount = worker.getSplitPlanCount(); + workerPool.submit(worker); + queue.put(tn); + + assertThatEventually("executing work should see plan count increase", + worker::getSplitPlanCount, greaterThan(beforeSplitPlanCount)); + } + + /** + * Assert that a rate limit is honored, at least in a rough way. Maintainers should manually + * inspect the log messages emitted by the worker thread to confirm that expected behavior. + */ + @Test + public void testRateLimit() throws Exception { + final TableName tn = tableName.getTableName(); + final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) + .setNormalizationEnabled(true) + .build(); + final RegionInfo splitRegionInfo = RegionInfoBuilder.newBuilder(tn).build(); + final RegionInfo mergeRegionInfo1 = RegionInfoBuilder.newBuilder(tn).build(); + final RegionInfo mergeRegionInfo2 = RegionInfoBuilder.newBuilder(tn).build(); + when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); + when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())) + .thenReturn(1L); + when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())) + .thenReturn(1L); + when(regionNormalizer.computePlansForTable(tn)) + .thenReturn(Arrays.asList( + new SplitNormalizationPlan(splitRegionInfo, 2), + new MergeNormalizationPlan.Builder() + .addTarget(mergeRegionInfo1, 1) + .addTarget(mergeRegionInfo2, 2) + .build(), + new SplitNormalizationPlan(splitRegionInfo, 1))); + + final Configuration conf = testingUtility.getConfiguration(); + conf.set("hbase.normalizer.throughput.max_bytes_per_sec", "1m"); + final RegionNormalizerWorker worker = new RegionNormalizerWorker( + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + workerPool.submit(worker); + final long startTime = System.nanoTime(); + queue.put(tn); + + assertThatEventually("executing work should see split plan count increase", + worker::getSplitPlanCount, comparesEqualTo(2L)); + assertThatEventually("executing work should see merge plan count increase", + worker::getMergePlanCount, comparesEqualTo(1L)); + + final long endTime = System.nanoTime(); + assertThat("rate limited normalizer should have taken at least 5 seconds", + Duration.ofNanos(endTime - startTime), greaterThanOrEqualTo(Duration.ofSeconds(5))); + } + + /** + * Repeatedly evaluates {@code matcher} against the result of calling {@code actualSupplier} + * until the matcher succeeds or the timeout period of 30 seconds is exhausted. + */ + private void assertThatEventually( + final String reason, + final Supplier actualSupplier, + final Matcher matcher + ) throws Exception { + testingUtility.waitFor(TimeUnit.SECONDS.toMillis(30), + new Waiter.ExplainingPredicate() { + private T lastValue = null; + + @Override + public String explainFailure() { + final Description description = new StringDescription() + .appendText(reason) + .appendText("\nExpected: ") + .appendDescriptionOf(matcher) + .appendText("\n but: "); + matcher.describeMismatch(lastValue, description); + return description.toString(); + } + + @Override public boolean evaluate() { + lastValue = actualSupplier.get(); + return matcher.matches(lastValue); + } + }); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 89da907eeb09..f263cbc4fdfd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -175,8 +175,12 @@ public void testMergeOfSmallRegions() { createRegionSizesMap(regionInfos, 15, 5, 5, 15, 16); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(1), regionInfos.get(2)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(1), 5) + .addTarget(regionInfos.get(2), 5) + .build())); } // Test for situation illustrated in HBASE-14867 @@ -188,9 +192,12 @@ public void testMergeOfSecondSmallestRegions() { createRegionSizesMap(regionInfos, 1, 10000, 10000, 10000, 2700, 2700); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(4), regionInfos.get(5)) - )); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(4), 2700) + .addTarget(regionInfos.get(5), 2700) + .build())); } @Test @@ -214,7 +221,7 @@ public void testSplitOfLargeRegion() { setupMocksForNormalizer(regionSizes, regionInfos); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(3)))); + new SplitNormalizationPlan(regionInfos.get(3), 30))); } @Test @@ -229,18 +236,26 @@ public void testSplitWithTargetRegionSize() throws Exception { when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize()) .thenReturn(20L); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(2)), - new SplitNormalizationPlan(regionInfos.get(3)), - new SplitNormalizationPlan(regionInfos.get(4)), - new SplitNormalizationPlan(regionInfos.get(5)) + new SplitNormalizationPlan(regionInfos.get(2), 60), + new SplitNormalizationPlan(regionInfos.get(3), 80), + new SplitNormalizationPlan(regionInfos.get(4), 100), + new SplitNormalizationPlan(regionInfos.get(5), 120) )); // test when target region size is 200 when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize()) .thenReturn(200L); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)), - new MergeNormalizationPlan(regionInfos.get(2), regionInfos.get(3)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains( + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 20) + .addTarget(regionInfos.get(1), 40) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 60) + .addTarget(regionInfos.get(3), 80) + .build())); } @Test @@ -255,14 +270,18 @@ public void testSplitWithTargetRegionCount() throws Exception { when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount()) .thenReturn(8); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(2)), - new SplitNormalizationPlan(regionInfos.get(3)))); + new SplitNormalizationPlan(regionInfos.get(2), 60), + new SplitNormalizationPlan(regionInfos.get(3), 80))); // test when target region count is 3 when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount()) .thenReturn(3); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 20) + .addTarget(regionInfos.get(1), 40) + .build())); } @Test @@ -312,14 +331,17 @@ public void testHonorsMinimumRegionCount() { List plans = normalizer.computePlansForTable(tableName); assertThat(plans, contains( - new SplitNormalizationPlan(regionInfos.get(2)), - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)))); + new SplitNormalizationPlan(regionInfos.get(2), 10), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 1) + .build())); // have to call setupMocks again because we don't have dynamic config update on normalizer. conf.setInt(MIN_REGION_COUNT_KEY, 4); setupMocksForNormalizer(regionSizes, regionInfos); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(2)))); + new SplitNormalizationPlan(regionInfos.get(2), 10))); } @Test @@ -356,8 +378,12 @@ public void testHonorsMergeMinRegionSize() { assertFalse(normalizer.isSplitEnabled()); assertEquals(1, normalizer.getMergeMinRegionSizeMb()); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 2) + .build())); conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 3); setupMocksForNormalizer(regionSizes, regionInfos); @@ -378,9 +404,18 @@ public void testMergeEmptyRegions() { assertFalse(normalizer.isSplitEnabled()); assertEquals(0, normalizer.getMergeMinRegionSizeMb()); assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)), - new MergeNormalizationPlan(regionInfos.get(2), regionInfos.get(3)), - new MergeNormalizationPlan(regionInfos.get(5), regionInfos.get(6)))); + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 1) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 10) + .addTarget(regionInfos.get(3), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(5), 10) + .addTarget(regionInfos.get(6), 0) + .build())); } // This test is to make sure that normalizer is only going to merge adjacent regions. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java index 173adf49db26..f5feb59ca329 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.Collections; import java.util.Comparator; @@ -161,6 +160,7 @@ public void testHonorsNormalizerTableSetting() throws Exception { tn2 + " should not have split.", tn2RegionCount, getRegionCount(tn2)); + LOG.debug("waiting for t3 to settle..."); waitForTableRegionCount(tn3, tn3RegionCount); } finally { dropIfExists(tn1); @@ -187,7 +187,7 @@ void testRegionNormalizationSplit(boolean limitedByQuota) throws Exception { : TableName.valueOf(name.getMethodName()); final int currentRegionCount = createTableBegsSplit(tableName, true, false); - final long existingSkippedSplitCount = master.getRegionNormalizer() + final long existingSkippedSplitCount = master.getRegionNormalizerManager() .getSkippedCount(PlanType.SPLIT); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); @@ -332,7 +332,8 @@ private static void waitForSkippedSplits(final HMaster master, return "waiting to observe split attempt and skipped."; } @Override public boolean evaluate() { - final long skippedSplitCount = master.getRegionNormalizer().getSkippedCount(PlanType.SPLIT); + final long skippedSplitCount = master.getRegionNormalizerManager() + .getSkippedCount(PlanType.SPLIT); return skippedSplitCount > existingSkippedSplitCount; } }); From 665a8767a06b07fdca2e42b5ed0a0b132d8b3d12 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 9 Oct 2020 08:46:05 -0700 Subject: [PATCH 119/769] HBASE-22976 [HBCK2] Add RecoveredEditsPlayer (#2504) Make it so WALPlayer can replay recovered.edits files. hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java Allow for WAL files that do NOT have a startime in their name. Use the 'generic' WAL-filename parser instead of the one that used be local here. Implement support for 'startTime' filter. Previous was just not implemented. hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java Checkstyle. hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java Use the new general WAL name timestamp parser. hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java Utility for parsing timestamp from WAL filename. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java Export attributes about the local recovered.edits test file so other tests can play with it. Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/util/CommonFSUtils.java | 3 +- .../hbase/mapreduce/WALInputFormat.java | 82 +++++++++++-------- .../hadoop/hbase/mapreduce/WALPlayer.java | 18 ++-- .../hbase/mapreduce/TestWALInputFormat.java | 75 +++++++++++++++++ .../hadoop/hbase/mapreduce/TestWALPlayer.java | 33 ++++++-- .../hbase/mapreduce/TestWALRecordReader.java | 35 ++++---- .../hbase/wal/AbstractFSWALProvider.java | 12 +-- .../java/org/apache/hadoop/hbase/wal/WAL.java | 29 +++++++ .../regionserver/TestRecoveredEdits.java | 63 ++++++++------ src/main/asciidoc/_chapters/ops_mgt.adoc | 36 ++------ 10 files changed, 263 insertions(+), 123 deletions(-) create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index aaa3e82f23e0..2549c6df507e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -364,7 +364,8 @@ private static boolean isValidWALRootDir(Path walDir, final Configuration c) thr if (!qualifiedWalDir.equals(rootDir)) { if (qualifiedWalDir.toString().startsWith(rootDir.toString() + "/")) { throw new IllegalStateException("Illegal WAL directory specified. " + - "WAL directories are not permitted to be under the root directory if set."); + "WAL directories are not permitted to be under root directory: rootDir=" + + rootDir.toString() + ", qualifiedWALDir=" + qualifiedWalDir); } } return true; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 7c4be83a73e9..b410fc22d891 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +22,21 @@ import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; +import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; @@ -49,6 +46,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL} files. @@ -77,10 +77,6 @@ public WALSplit() {} * Represent an WALSplit, i.e. a single WAL file. * Start- and EndTime are managed by the split, so that WAL files can be * filtered before WALEdits are passed to the mapper(s). - * @param logFileName - * @param fileSize - * @param startTime - * @param endTime */ public WALSplit(String logFileName, long fileSize, long startTime, long endTime) { this.logFileName = logFileName; @@ -186,7 +182,9 @@ private void seek() throws IOException { @Override public boolean nextKeyValue() throws IOException, InterruptedException { - if (reader == null) return false; + if (reader == null) { + return false; + } this.currentPos = reader.getPosition(); Entry temp; long i = -1; @@ -204,7 +202,9 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } while (temp != null && temp.getKey().getWriteTime() < startTime); if (temp == null) { - if (i > 0) LOG.info("Skipped " + i + " entries."); + if (i > 0) { + LOG.info("Skipped " + i + " entries."); + } LOG.info("Reached end of file."); return false; } else if (i > 0) { @@ -242,7 +242,9 @@ public float getProgress() throws IOException, InterruptedException { @Override public void close() throws IOException { LOG.info("Closing reader"); - if (reader != null) this.reader.close(); + if (reader != null) { + this.reader.close(); + } } } @@ -301,40 +303,56 @@ private Path[] getInputPaths(Configuration conf) { inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); } + /** + * @param startTime If file looks like it has a timestamp in its name, we'll check if newer + * or equal to this value else we will filter out the file. If name does not + * seem to have a timestamp, we will just return it w/o filtering. + * @param endTime If file looks like it has a timestamp in its name, we'll check if older or equal + * to this value else we will filter out the file. If name does not seem to + * have a timestamp, we will just return it w/o filtering. + */ private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { List result = new ArrayList<>(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); - RemoteIterator iter = fs.listLocatedStatus(dir); - if (!iter.hasNext()) return Collections.emptyList(); + if (!iter.hasNext()) { + return Collections.emptyList(); + } while (iter.hasNext()) { LocatedFileStatus file = iter.next(); if (file.isDirectory()) { - // recurse into sub directories + // Recurse into sub directories result.addAll(getFiles(fs, file.getPath(), startTime, endTime)); } else { - String name = file.getPath().toString(); - int idx = name.lastIndexOf('.'); - if (idx > 0) { - try { - long fileStartTime = Long.parseLong(name.substring(idx+1)); - if (fileStartTime <= endTime) { - LOG.info("Found: " + file); - result.add(file); - } - } catch (NumberFormatException x) { - idx = 0; - } - } - if (idx == 0) { - LOG.warn("File " + name + " does not appear to be an WAL file. Skipping..."); - } + addFile(result, file, startTime, endTime); } } + // TODO: These results should be sorted? Results could be content of recovered.edits directory + // -- null padded increasing numeric -- or a WAL file w/ timestamp suffix or timestamp and + // then meta suffix. See AbstractFSWALProvider#WALStartTimeComparator return result; } + static void addFile(List result, LocatedFileStatus lfs, long startTime, + long endTime) { + long timestamp = WAL.getTimestamp(lfs.getPath().getName()); + if (timestamp > 0) { + // Looks like a valid timestamp. + if (timestamp <= endTime && timestamp >= startTime) { + LOG.info("Found {}", lfs.getPath()); + result.add(lfs); + } else { + LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), + startTime, Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); + } + } else { + // If no timestamp, add it regardless. + LOG.info("Found (no-timestamp!) {}", lfs); + result.add(lfs); + } + } + @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 5b1aac654414..a47a12fffb5a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; @@ -239,6 +238,7 @@ protected boolean filter(Context context, final Cell cell) { super.cleanup(context); } + @SuppressWarnings("checkstyle:EmptyBlock") @Override public void setup(Context context) throws IOException { String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY); @@ -377,17 +377,21 @@ private void usage(final String errorMsg) { System.err.println(" directory of WALs to replay."); System.err.println(" comma separated list of tables. If no tables specified,"); System.err.println(" all are imported (even hbase:meta if present)."); - System.err.println(" WAL entries can be mapped to a new set of tables by passing"); - System.err.println(" , a comma separated list of target tables."); - System.err.println(" If specified, each table in must have a mapping."); + System.err.println(" WAL entries can be mapped to a new set of tables by " + + "passing"); + System.err.println(" , a comma separated list of target " + + "tables."); + System.err.println(" If specified, each table in must have a " + + "mapping."); System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" Only one table can be specified, and no mapping allowed!"); System.err.println("To specify a time range, pass:"); System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); - System.err.println(" The start and the end date of timerange. The dates can be expressed"); - System.err.println(" in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format."); + System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); + System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + + "format."); System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java new file mode 100644 index 000000000000..8d21c394d554 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +@Category({ MapReduceTests.class, SmallTests.class}) +public class TestWALInputFormat { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWALInputFormat.class); + + /** + * Test the primitive start/end time filtering. + */ + @Test + public void testAddFile() { + List lfss = new ArrayList<>(); + LocatedFileStatus lfs = Mockito.mock(LocatedFileStatus.class); + long now = System.currentTimeMillis(); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now)); + WALInputFormat.addFile(lfss, lfs, now, now); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 1, now - 1); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 2, now - 1); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 2, now); + assertEquals(2, lfss.size()); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, now); + assertEquals(3, lfss.size()); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(4, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now, now + 2); + assertEquals(5, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now + 1, now + 2); + assertEquals(5, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name")); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(6, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name.123")); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(7, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now + ".meta")); + WALInputFormat.addFile(lfss, lfs, now, now); + assertEquals(8, lfss.size()); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 432aff1dd044..d60a3d9b712c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +24,8 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.PrintStream; import java.util.ArrayList; import org.apache.hadoop.conf.Configuration; @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.WALPlayer.WALKeyValueMapper; +import org.apache.hadoop.hbase.regionserver.TestRecoveredEdits; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; @@ -73,7 +74,6 @@ */ @Category({MapReduceTests.class, LargeTests.class}) public class TestWALPlayer { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALPlayer.class); @@ -91,7 +91,7 @@ public class TestWALPlayer { @BeforeClass public static void beforeClass() throws Exception { - conf= TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.createRootDir(); walRootDir = TEST_UTIL.createWALRootDir(); fs = CommonFSUtils.getRootDirFileSystem(conf); @@ -106,9 +106,32 @@ public static void afterClass() throws Exception { logFs.delete(walRootDir, true); } + /** + * Test that WALPlayer can replay recovered.edits files. + */ + @Test + public void testPlayingRecoveredEdit() throws Exception { + TableName tn = TableName.valueOf(TestRecoveredEdits.RECOVEREDEDITS_TABLENAME); + TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); + // Copy testing recovered.edits file that is over under hbase-server test resources + // up into a dir in our little hdfs cluster here. + String hbaseServerTestResourcesEdits = System.getProperty("test.build.classes") + + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + assertTrue(new File(hbaseServerTestResourcesEdits).exists()); + FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); + // Target dir. + Path targetDir = new Path("edits").makeQualified(dfs.getUri(), dfs.getHomeDirectory()); + assertTrue(dfs.mkdirs(targetDir)); + dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); + assertEquals(0, + ToolRunner.run(new WALPlayer(this.conf), new String [] {targetDir.toString()})); + // I don't know how many edits are in this file for this table... so just check more than 1. + assertTrue(TEST_UTIL.countRows(tn) > 0); + } + /** * Simple end-to-end test - * @throws Exception */ @Test public void testWALPlayer() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 21a43bd6b47f..40e7f37147a7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; @@ -42,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -123,8 +123,7 @@ public static void tearDownAfterClass() throws Exception { } /** - * Test partial reads from the log based on passed time range - * @throws Exception + * Test partial reads from the WALs based on passed time range. */ @Test public void testPartialRead() throws Exception { @@ -140,6 +139,7 @@ public void testPartialRead() throws Exception { edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts+1, value)); log.appendData(info, getWalKeyImpl(ts+1, scopes), edit); log.sync(); + Threads.sleep(10); LOG.info("Before 1st WAL roll " + log.toString()); log.rollWriter(); LOG.info("Past 1st WAL roll " + log.toString()); @@ -164,26 +164,29 @@ public void testPartialRead() throws Exception { jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); jobConf.setLong(WALInputFormat.END_TIME_KEY, ts); - // only 1st file is considered, and only its 1st entry is used + // Only 1st file is considered, and only its 1st entry is in-range. List splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); - jobConf.setLong(WALInputFormat.START_TIME_KEY, ts+1); jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1+1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - // both files need to be considered assertEquals(2, splits.size()); - // only the 2nd entry from the 1st file is used - testSplit(splits.get(0), Bytes.toBytes("2")); - // only the 1nd entry from the 2nd file is used + // Both entries from first file are in-range. + testSplit(splits.get(0), Bytes.toBytes("1"), Bytes.toBytes("2")); + // Only the 1st entry from the 2nd file is in-range. testSplit(splits.get(1), Bytes.toBytes("3")); + + jobConf.setLong(WALInputFormat.START_TIME_KEY, ts + 1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1); + splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); + assertEquals(1, splits.size()); + // Only the 1st entry from the 2nd file is in-range. + testSplit(splits.get(0), Bytes.toBytes("3")); } /** * Test basic functionality - * @throws Exception */ @Test public void testWALRecordReader() throws Exception { @@ -234,11 +237,7 @@ public void testWALRecordReader() throws Exception { jobConf.setLong(WALInputFormat.END_TIME_KEY, Long.MAX_VALUE); jobConf.setLong(WALInputFormat.START_TIME_KEY, thirdTs); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - // both logs need to be considered - assertEquals(2, splits.size()); - // but both readers skip all edits - testSplit(splits.get(0)); - testSplit(splits.get(1)); + assertTrue(splits.isEmpty()); } /** @@ -346,4 +345,4 @@ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) } reader.close(); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 6f9c87b00518..e7bdb0bf6789 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.wal; -import static org.apache.commons.lang3.StringUtils.isNumeric; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; @@ -439,17 +438,12 @@ public int compare(Path o1, Path o2) { * @return start time */ private static long getTS(Path p) { - String name = p.getName(); - String [] splits = name.split("\\."); - String ts = splits[splits.length - 1]; - if (!isNumeric(ts)) { - // Its a '.meta' or a '.syncrep' suffix. - ts = splits[splits.length - 2]; - } - return Long.parseLong(ts); + return WAL.getTimestamp(p.getName()); } } + + public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 747b2770d457..20379fd7fe9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -32,6 +32,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import static org.apache.commons.lang3.StringUtils.isNumeric; /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides @@ -299,4 +300,32 @@ public String toString() { return this.key + "=" + this.edit; } } + + /** + * Split a WAL filename to get a start time. WALs usually have the time we start writing to them + * as part of their name, usually the suffix. Sometimes there will be an extra suffix as when it + * is a WAL for the meta table. For example, WALs might look like this + * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a + * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have + * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending + * order. Here is an example: 0000000000000016310. Allow for this. + * @param name Name of the WAL file. + * @return Timestamp or -1. + */ + public static long getTimestamp(String name) { + String [] splits = name.split("\\."); + if (splits.length <= 1) { + return -1; + } + String timestamp = splits[splits.length - 1]; + if (!isNumeric(timestamp)) { + // Its a '.meta' or a '.syncrep' suffix. + timestamp = splits[splits.length - 2]; + if (!isNumeric(timestamp)) { + return -1; + } + } + return Long.parseLong(timestamp); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index ba1d17dd4c6b..c287e02dc97d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -78,6 +78,32 @@ public class TestRecoveredEdits { @Rule public TestName testName = new TestName(); + /** + * Path to a recovered.edits file in hbase-server test resources folder. + * This is a little fragile getting this path to a file of 10M of edits. + */ + @SuppressWarnings("checkstyle:VisibilityModifier") + public static final Path RECOVEREDEDITS_PATH = new Path( + System.getProperty("test.build.classes", "target/test-classes"), + "0000000000000016310"); + + /** + * Name of table referenced by edits in the recovered.edits file. + */ + public static final String RECOVEREDEDITS_TABLENAME = "IntegrationTestBigLinkedList"; + + /** + * Column family referenced by edits in the recovered.edits file. + */ + public static final byte [] RECOVEREDEDITS_COLUMNFAMILY = Bytes.toBytes("meta"); + public static final byte[][] RECOVEREDITS_COLUMNFAMILY_ARRAY = + new byte[][] {RECOVEREDEDITS_COLUMNFAMILY}; + public static final ColumnFamilyDescriptor RECOVEREDEDITS_CFD = + ColumnFamilyDescriptorBuilder.newBuilder(RECOVEREDEDITS_COLUMNFAMILY).build(); + + /** + * Name of table mentioned edits from recovered.edits + */ @BeforeClass public static void setUpBeforeClass() throws Exception { blockCache = BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration()); @@ -102,13 +128,9 @@ private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy po // Set it so we flush every 1M or so. Thats a lot. conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy).toLowerCase()); - // The file of recovered edits has a column family of 'meta'. - final String columnFamily = "meta"; - byte[][] columnFamilyAsByteArray = new byte[][] { Bytes.toBytes(columnFamily) }; - TableDescriptor tableDescriptor = TableDescriptorBuilder - .newBuilder(TableName.valueOf(testName.getMethodName())).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(columnFamily)).build()) - .build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder. + newBuilder(TableName.valueOf(testName.getMethodName())). + setColumnFamily(RECOVEREDEDITS_CFD) .build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); final String encodedRegionName = hri.getEncodedName(); Path hbaseRootDir = TEST_UTIL.getDataTestDir(); @@ -123,24 +145,20 @@ private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy po HRegion region = HBaseTestingUtility .createRegionAndWAL(hri, hbaseRootDir, conf, tableDescriptor, blockCache); assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName()); - List storeFiles = region.getStoreFileList(columnFamilyAsByteArray); + List storeFiles = region.getStoreFileList(RECOVEREDITS_COLUMNFAMILY_ARRAY); // There should be no store files. assertTrue(storeFiles.isEmpty()); region.close(); Path regionDir = FSUtils.getRegionDirFromRootDir(hbaseRootDir, hri); Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regionDir); - // This is a little fragile getting this path to a file of 10M of edits. - Path recoveredEditsFile = new Path( - System.getProperty("test.build.classes", "target/test-classes"), - "0000000000000016310"); // Copy this file under the region's recovered.edits dir so it is replayed on reopen. - Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName()); - fs.copyToLocalFile(recoveredEditsFile, destination); + Path destination = new Path(recoveredEditsDir, RECOVEREDEDITS_PATH.getName()); + fs.copyToLocalFile(RECOVEREDEDITS_PATH, destination); assertTrue(fs.exists(destination)); // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay. region = HRegion.openHRegion(region, null); assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName()); - storeFiles = region.getStoreFileList(columnFamilyAsByteArray); + storeFiles = region.getStoreFileList(RECOVEREDITS_COLUMNFAMILY_ARRAY); // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if // we flush at 1MB, that there are at least 3 flushed files that are there because of the // replay of edits. @@ -150,19 +168,16 @@ private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy po assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10); } // Now verify all edits made it into the region. - int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region); + int count = verifyAllEditsMadeItIn(fs, conf, RECOVEREDEDITS_PATH, region); + assertTrue(count > 0); LOG.info("Checked " + count + " edits made it in"); } /** - * @param fs - * @param conf - * @param edits - * @param region * @return Return how many edits seen. - * @throws IOException */ - private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf, + // Used by TestWALPlayer over in hbase-mapreduce too. + public static int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf, final Path edits, final HRegion region) throws IOException { int count = 0; // Read all cells from recover edits diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index 6ea23655d3c7..d27c5d6e488f 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -424,32 +424,11 @@ See <>. For bulk replaying WAL files or _recovered.edits_ files, see <>. For reading/verifying individual files, read on. -[[hlog_tool]] -==== FSHLog tool - -The main method on `FSHLog` offers manual split and dump facilities. -Pass it WALs or the product of a split, the content of the _recovered.edits_. -directory. - -You can get a textual dump of a WAL file content by doing the following: - ----- - $ ./bin/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --dump hdfs://example.org:8020/hbase/WALs/example.org,60020,1283516293161/10.10.21.10%3A60020.1283973724012 ----- - -The return code will be non-zero if there are any issues with the file so you can test wholesomeness of file by redirecting `STDOUT` to `/dev/null` and testing the program return. - -Similarly you can force a split of a log file directory by doing: - ----- - $ ./bin/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --split hdfs://example.org:8020/hbase/WALs/example.org,60020,1283516293161/ ----- - [[hlog_tool.prettyprint]] -===== WALPrettyPrinter +==== WALPrettyPrinter -The `WALPrettyPrinter` is a tool with configurable options to print the contents of a WAL. -You can invoke it via the HBase cli with the 'wal' command. +The `WALPrettyPrinter` is a tool with configurable options to print the contents of a WAL +or a _recovered.edits_ file. You can invoke it via the HBase cli with the 'wal' command. ---- $ ./bin/hbase wal hdfs://example.org:8020/hbase/WALs/example.org,60020,1283516293161/10.10.21.10%3A60020.1283973724012 @@ -904,7 +883,10 @@ The output can optionally be mapped to another set of tables. WALPlayer can also generate HFiles for later bulk importing, in that case only a single table and no mapping can be specified. -.WALPrettyPrinter/FSHLog Tool +Finally, you can use WALPlayer to replay the content of a Regions `recovered.edits` directory (the files under +`recovered.edits` directory have the same format as WAL files). + +.WALPrettyPrinter [NOTE] ==== To read or verify single WAL files or _recovered.edits_ files, since they share the WAL format, @@ -945,8 +927,8 @@ To generate HFiles to bulk load instead of loading HBase directly, pass: To specify a time range, pass: -Dwal.start.time=[date|ms] -Dwal.end.time=[date|ms] - The start and the end date of timerange. The dates can be expressed - in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format. + The start and the end date of timerange (inclusive). The dates can be + expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS format. E.g. 1234567890120 or 2009-02-13T23:32:30.12 Other options: -Dmapreduce.job.name=jobName From f34ba064688e8f992639c24180488a937f2113bc Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 10 Oct 2020 08:22:12 +0800 Subject: [PATCH 120/769] HBASE-25163 Increase the timeout value for nightly jobs (#2512) Signed-off-by: stack Signed-off-by: Jan Hentschel Signed-off-by: Viraj Jasani --- dev-support/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index c250dcefe604..01d50197fbc5 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -25,7 +25,7 @@ pipeline { } options { buildDiscarder(logRotator(numToKeepStr: '15')) - timeout (time: 9, unit: 'HOURS') + timeout (time: 16, unit: 'HOURS') timestamps() skipDefaultCheckout() disableConcurrentBuilds() From b81520709680140305628c40ee41f01389155de3 Mon Sep 17 00:00:00 2001 From: XinSun Date: Sat, 10 Oct 2020 16:09:12 +0800 Subject: [PATCH 121/769] HBASE-25171 Remove ZNodePaths.namespaceZNode (#2526) Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index a0065a9e9cbf..71936b9f36d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -74,8 +74,6 @@ public class ZNodePaths { public final String regionNormalizerZNode; // znode containing the state of all switches, currently there are split and merge child node. public final String switchZNode; - // znode containing namespace descriptors - public final String namespaceZNode; // znode of indicating master maintenance mode public final String masterMaintZNode; @@ -106,7 +104,6 @@ public ZNodePaths(Configuration conf) { regionNormalizerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); - namespaceZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); masterMaintZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); replicationZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication")); @@ -134,7 +131,6 @@ public String toString() { .append(", balancerZNode=").append(balancerZNode) .append(", regionNormalizerZNode=").append(regionNormalizerZNode) .append(", switchZNode=").append(switchZNode) - .append(", namespaceZNode=").append(namespaceZNode) .append(", masterMaintZNode=").append(masterMaintZNode) .append(", replicationZNode=").append(replicationZNode) .append(", peersZNode=").append(peersZNode) From c367e91aff0c1ebdefb62bc4eafb03c644a80a49 Mon Sep 17 00:00:00 2001 From: Qi Yu Date: Sat, 10 Oct 2020 16:09:39 +0800 Subject: [PATCH 122/769] HBASE-25162 Make flaky tests run more aggressively (#2525) Change the trigger interval from 12h to 4h Signed-off-by: Duo Zhang --- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index 282b83115883..959ae31a0767 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -21,7 +21,7 @@ pipeline { } } triggers { - cron('H */12 * * *') // Every four hours. See https://jenkins.io/doc/book/pipeline/syntax/#cron-syntax + cron('H H/4 * * *') // Every four hours. See https://jenkins.io/doc/book/pipeline/syntax/#cron-syntax } options { // this should roughly match how long we tell the flaky dashboard to look at From accd9750aa217c40e9db641c53905b8f4bb7e66d Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Sun, 11 Oct 2020 10:46:06 +0530 Subject: [PATCH 123/769] HBASE-25065 WAL archival to be done by a separate thread (#2501) * HBASE-25065 WAL archival can be batched/throttled and also done by a separate thread * Fix checkstyle issues * Address review comments * checkstyle comments * Addressing final review comments Signed-off-by: Michael Stack --- .../hbase/master/region/MasterRegion.java | 2 +- .../hbase/regionserver/HRegionServer.java | 2 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 69 +++++++++++++++++-- .../hbase/regionserver/wal/AsyncFSWAL.java | 15 +++- .../hadoop/hbase/regionserver/wal/FSHLog.java | 30 ++++++-- .../hbase/wal/AbstractFSWALProvider.java | 8 ++- .../hadoop/hbase/wal/AsyncFSWALProvider.java | 8 +-- .../hadoop/hbase/wal/DisabledWALProvider.java | 5 +- .../hadoop/hbase/wal/FSHLogProvider.java | 6 +- .../hbase/wal/RegionGroupingProvider.java | 9 ++- .../hbase/wal/SyncReplicationWALProvider.java | 7 +- .../apache/hadoop/hbase/wal/WALFactory.java | 19 +++-- .../apache/hadoop/hbase/wal/WALProvider.java | 5 +- .../regionserver/TestFailedAppendAndSync.java | 44 +++++++++++- .../wal/AbstractTestLogRolling.java | 7 +- .../hadoop/hbase/wal/IOTestProvider.java | 4 +- .../hadoop/hbase/wal/TestWALFactory.java | 2 +- 17 files changed, 199 insertions(+), 43 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java index 81da59d6b665..688a5497ddc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java @@ -301,7 +301,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException params.archivedWalSuffix(), params.rollPeriodMs(), params.flushSize()); walRoller.start(); - WALFactory walFactory = new WALFactory(conf, server.getServerName().toString(), false); + WALFactory walFactory = new WALFactory(conf, server.getServerName().toString(), server, false); Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName()); HRegion region; if (fs.exists(tableDir)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index d6eb45fe65e1..d51eab4ec400 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1906,7 +1906,7 @@ private void setupWALAndReplication() throws IOException { boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster && !LoadBalancer.isMasterCanHostUserRegions(conf); WALFactory factory = - new WALFactory(conf, serverName.toString(), !isMasterNoTableOrSystemTableOnly); + new WALFactory(conf, serverName.toString(), this, !isMasterNoTableOrSystemTableOnly); if (!isMasterNoTableOrSystemTableOnly) { // TODO Replication make assumptions here based on the default filesystem impl Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index d2c624ab446c..ac99ea671e5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -41,6 +41,8 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -53,6 +55,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -84,8 +87,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + + + + /** * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS. Only one @@ -185,6 +192,8 @@ public abstract class AbstractFSWAL implements WAL { */ protected final Configuration conf; + protected final Abortable abortable; + /** Listeners that are called on WAL events. */ protected final List listeners = new CopyOnWriteArrayList<>(); @@ -329,6 +338,11 @@ public WalProps(Map encodedName2HighestSequenceId, long logSize) { protected final AtomicBoolean rollRequested = new AtomicBoolean(false); + private final ExecutorService logArchiveExecutor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("WAL-Archiver-%d").build()); + + private final int archiveRetries; + public long getFilenum() { return this.filenum.get(); } @@ -380,10 +394,19 @@ protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String lo final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, final String suffix) throws FailedLogCloseException, IOException { + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + } + + protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Path rootDir, + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) + throws FailedLogCloseException, IOException { this.fs = fs; this.walDir = new Path(rootDir, logDir); this.walArchiveDir = new Path(rootDir, archiveDir); this.conf = conf; + this.abortable = abortable; if (!fs.exists(walDir) && !fs.mkdirs(walDir)) { throw new IOException("Unable to mkdir " + walDir); @@ -482,6 +505,8 @@ protected SyncFuture initialValue() { this.walTooOldNs = TimeUnit.SECONDS.toNanos(conf.getInt( SURVIVED_TOO_LONG_SEC_KEY, SURVIVED_TOO_LONG_SEC_DEFAULT)); this.useHsync = conf.getBoolean(HRegion.WAL_HSYNC_CONF_KEY, HRegion.DEFAULT_WAL_HSYNC); + archiveRetries = this.conf.getInt("hbase.regionserver.walroll.archive.retries", 0); + } /** @@ -715,11 +740,39 @@ private void cleanOldLogs() throws IOException { regionsBlockingThisWal.clear(); } } + if (logsToArchive != null) { - for (Pair logAndSize : logsToArchive) { - this.totalLogSize.addAndGet(-logAndSize.getSecond()); - archiveLogFile(logAndSize.getFirst()); - this.walFile2Props.remove(logAndSize.getFirst()); + final List> localLogsToArchive = logsToArchive; + // make it async + for (Pair log : localLogsToArchive) { + logArchiveExecutor.execute(() -> { + archive(log); + }); + this.walFile2Props.remove(log.getFirst()); + } + } + } + + protected void archive(final Pair log) { + int retry = 1; + while (true) { + try { + archiveLogFile(log.getFirst()); + totalLogSize.addAndGet(-log.getSecond()); + // successful + break; + } catch (Throwable e) { + if (retry > archiveRetries) { + LOG.error("Failed log archiving for the log {},", log.getFirst(), e); + if (this.abortable != null) { + this.abortable.abort("Failed log archiving", e); + break; + } + } else { + LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, + e); + } + retry++; } } } @@ -732,7 +785,8 @@ public static Path getWALArchivePath(Path archiveDir, Path p) { return new Path(archiveDir, p.getName()); } - private void archiveLogFile(final Path p) throws IOException { + @VisibleForTesting + protected void archiveLogFile(final Path p) throws IOException { Path newPath = getWALArchivePath(this.walArchiveDir, p); // Tell our listeners that a log is going to be archived. if (!this.listeners.isEmpty()) { @@ -907,6 +961,9 @@ public void shutdown() throws IOException { rollWriterLock.lock(); try { doShutdown(); + if (logArchiveExecutor != null) { + logArchiveExecutor.shutdownNow(); + } } finally { rollWriterLock.unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index a40e50335d99..342446098be8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -44,9 +44,11 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput; @@ -60,7 +62,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -68,6 +69,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor; + /** * An asynchronous implementation of FSWAL. *

    @@ -206,7 +208,16 @@ public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, Configuration conf, List listeners, boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, + eventLoopGroup, channelClass); + } + + public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDir, + String archiveDir, Configuration conf, List listeners, + boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, + Class channelClass) throws FailedLogCloseException, IOException { + super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, + suffix); this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; Supplier hasConsumerTask; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 001be00d8a11..fe910aa067b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -40,10 +40,12 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.trace.TraceUtil; @@ -62,10 +64,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * The default implementation of FSWAL. */ @@ -168,7 +170,7 @@ public class FSHLog extends AbstractFSWAL { private final int waitOnShutdownInSeconds; private final ExecutorService closeExecutor = Executors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); /** * Exception handler to pass the disruptor ringbuffer. Same as native implementation only it logs @@ -208,11 +210,25 @@ public FSHLog(final FileSystem fs, final Path root, final String logDir, final C this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null); } + @VisibleForTesting + public FSHLog(final FileSystem fs, Abortable abortable, final Path root, final String logDir, + final Configuration conf) throws IOException { + this(fs, abortable, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, + null); + } + + public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, + final String archiveDir, final Configuration conf, final List listeners, + final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + } + /** * Create an edit log at the given dir location. You should never have to load an * existing log. If there is a log at startup, it should have already been processed and deleted * by the time the WAL object is started up. * @param fs filesystem handle + * @param abortable Abortable - the server here * @param rootDir path to where logs and oldlogs * @param logDir dir where wals are stored * @param archiveDir dir where wals are archived @@ -226,10 +242,12 @@ public FSHLog(final FileSystem fs, final Path root, final String logDir, final C * @param suffix will be url encoded. null is treated as empty. non-empty must start with * {@link org.apache.hadoop.hbase.wal.AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} */ - public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir, + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) throws IOException { + super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, + suffix); this.minTolerableReplication = conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); this.lowReplicationRollLimit = conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index e7bdb0bf6789..84c94e608168 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.wal; + import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; @@ -29,10 +30,12 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -88,6 +91,7 @@ public interface Reader extends WAL.Reader { protected AtomicBoolean initialized = new AtomicBoolean(false); // for default wal provider, logPrefix won't change protected String logPrefix; + protected Abortable abortable; /** * We use walCreateLock to prevent wal recreation in different threads, and also prevent getWALs @@ -102,7 +106,8 @@ public interface Reader extends WAL.Reader { * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -119,6 +124,7 @@ public void init(WALFactory factory, Configuration conf, String providerId) thro } } logPrefix = sb.toString(); + this.abortable = abortable; doInit(conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 062b3688d3e4..3a2ffa7600bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -65,11 +65,11 @@ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long @Override protected AsyncFSWAL createWAL() throws IOException { - return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf), - getWALDirectoryName(factory.factoryId), + return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, - eventLoopGroup, channelClass); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, eventLoopGroup, + channelClass); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 0ff2195eaa04..6e5a0538296c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -25,8 +25,10 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -55,7 +57,8 @@ class DisabledWALProvider implements WALProvider { WAL disabled; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (null != disabled) { throw new IllegalStateException("WALProvider.init should only be called once."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index 3b91c2475cfe..e64d70f50981 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -67,7 +67,7 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, * Public because of FSHLog. Should be package-private */ public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, - final boolean overwritable, long blocksize) throws IOException { + final boolean overwritable, long blocksize) throws IOException { // Configuration already does caching for the Class lookup. Class logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, @@ -101,8 +101,8 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, @Override protected FSHLog createWAL() throws IOException { - return new FSHLog(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf), - getWALDirectoryName(factory.factoryId), + return new FSHLog(CommonFSUtils.getWALFileSystem(conf), abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 764d3d521ace..20d043b6ae26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -28,7 +28,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; @@ -137,14 +139,17 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, private List listeners = new ArrayList<>(); private String providerId; private Class providerClass; + private Abortable abortable; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (null != strategy) { throw new IllegalStateException("WALProvider.init should only be called once."); } this.conf = conf; this.factory = factory; + this.abortable = abortable; if (META_WAL_PROVIDER_ID.equals(providerId)) { // do not change the provider id if it is for meta @@ -171,7 +176,7 @@ public void init(WALFactory factory, Configuration conf, String providerId) thro private WALProvider createProvider(String group) throws IOException { WALProvider provider = WALFactory.createProvider(providerClass); provider.init(factory, conf, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group, this.abortable); provider.addWALActionsListener(new MetricsWAL()); return provider; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index 9859c204649f..001e1a8ea955 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -35,7 +35,9 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; @@ -108,11 +110,12 @@ public void setPeerInfoProvider(SyncReplicationPeerInfoProvider peerInfoProvider } @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } - provider.init(factory, conf, providerId); + provider.init(factory, conf, providerId, abortable); this.conf = conf; this.factory = factory; Pair> eventLoopGroupAndChannelClass = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 26b87277a13a..6a5feb0f4181 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -21,9 +21,11 @@ import java.io.InterruptedIOException; import java.util.List; import java.util.concurrent.atomic.AtomicReference; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; @@ -35,7 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -86,6 +87,7 @@ static enum Providers { public static final String WAL_ENABLED = "hbase.regionserver.hlog.enabled"; final String factoryId; + final Abortable abortable; private final WALProvider provider; // The meta updates are written to a different wal. If this // regionserver holds meta regions, then this ref will be non-null. @@ -119,6 +121,7 @@ private WALFactory(Configuration conf) { // this instance can't create wals, just reader/writers. provider = null; factoryId = SINGLETON_ID; + this.abortable = null; } @VisibleForTesting @@ -175,7 +178,7 @@ static WALProvider createProvider(Class clazz) throws IOE public WALFactory(Configuration conf, String factoryId) throws IOException { // default enableSyncReplicationWALProvider is true, only disable SyncReplicationWALProvider // for HMaster or HRegionServer which take system table only. See HBASE-19999 - this(conf, factoryId, true); + this(conf, factoryId, null, true); } /** @@ -183,11 +186,12 @@ public WALFactory(Configuration conf, String factoryId) throws IOException { * instances. * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations * to make a directory + * @param abortable the server associated with this WAL file * @param enableSyncReplicationWALProvider whether wrap the wal provider to a * {@link SyncReplicationWALProvider} */ - public WALFactory(Configuration conf, String factoryId, boolean enableSyncReplicationWALProvider) - throws IOException { + public WALFactory(Configuration conf, String factoryId, Abortable abortable, + boolean enableSyncReplicationWALProvider) throws IOException { // until we've moved reader/writer construction down into providers, this initialization must // happen prior to provider initialization, in case they need to instantiate a reader/writer. timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); @@ -196,20 +200,21 @@ public WALFactory(Configuration conf, String factoryId, boolean enableSyncReplic AbstractFSWALProvider.Reader.class); this.conf = conf; this.factoryId = factoryId; + this.abortable = abortable; // end required early initialization if (conf.getBoolean(WAL_ENABLED, true)) { WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); if (enableSyncReplicationWALProvider) { provider = new SyncReplicationWALProvider(provider); } - provider.init(this, conf, null); + provider.init(this, conf, null, this.abortable); provider.addWALActionsListener(new MetricsWAL()); this.provider = provider; } else { // special handling of existing configuration behavior. LOG.warn("Running with WAL disabled."); provider = new DisabledWALProvider(); - provider.init(this, conf, factoryId); + provider.init(this, conf, factoryId, null); } } @@ -274,7 +279,7 @@ WALProvider getMetaProvider() throws IOException { clz = getProviderClass(META_WAL_PROVIDER, conf.get(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); } provider = createProvider(clz); - provider.init(this, conf, AbstractFSWALProvider.META_WAL_PROVIDER_ID); + provider.init(this, conf, AbstractFSWALProvider.META_WAL_PROVIDER_ID, this.abortable); provider.addWALActionsListener(new MetricsWAL()); if (metaProvider.compareAndSet(null, provider)) { return provider; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index c3bd14995077..01c1d11ead70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -23,7 +23,9 @@ import java.util.List; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -46,7 +48,8 @@ public interface WALProvider { * @param conf may not be null * @param providerId differentiate between providers from one factory. may be null */ - void init(WALFactory factory, Configuration conf, String providerId) throws IOException; + void init(WALFactory factory, Configuration conf, String providerId, Abortable server) + throws IOException; /** * @param region the region which we want to get a WAL for it. Could be null. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java index a9ce54845e46..fdf96dab87fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALProvider.Writer; @@ -107,11 +110,13 @@ String getName() { class DodgyFSLog extends FSHLog { volatile boolean throwSyncException = false; volatile boolean throwAppendException = false; + volatile boolean throwArchiveException = false; + final AtomicLong rolls = new AtomicLong(0); - public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) + public DodgyFSLog(FileSystem fs, Server server, Path root, String logDir, Configuration conf) throws IOException { - super(fs, root, logDir, conf); + super(fs, server, root, logDir, conf); } @Override @@ -122,6 +127,18 @@ public Map> rollWriter(boolean force) return regions; } + @Override + protected void archiveLogFile(Path p) throws IOException { + if (throwArchiveException) { + throw new IOException("throw archival exception"); + } + } + + @Override + protected void archive(Pair localLogsToArchive) { + super.archive(localLogsToArchive); + } + @Override protected Writer createWriterInstance(Path path) throws IOException { final Writer w = super.createWriterInstance(path); @@ -176,7 +193,7 @@ public void testLockupAroundBadAssignSync() throws IOException { // the test. FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + getName()); - DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF); + DodgyFSLog dodgyWAL = new DodgyFSLog(fs, (Server)services, rootDir, getName(), CONF); dodgyWAL.init(); LogRoller logRoller = new LogRoller(services); logRoller.addWAL(dodgyWAL); @@ -256,6 +273,27 @@ public void testLockupAroundBadAssignSync() throws IOException { Threads.sleep(1); } } + + try { + dodgyWAL.throwAppendException = false; + dodgyWAL.throwSyncException = false; + dodgyWAL.throwArchiveException = true; + Pair pair = new Pair(); + pair.setFirst(new Path("/a/b/")); + pair.setSecond(100L); + dodgyWAL.archive(pair); + } catch (Throwable ioe) { + } + while (true) { + try { + // one more abort needs to be called + Mockito.verify(services, Mockito.atLeast(2)).abort(Mockito.anyString(), + (Throwable) Mockito.anyObject()); + break; + } catch (WantedButNotInvoked t) { + Threads.sleep(1); + } + } } finally { // To stop logRoller, its server has to say it is stopped. Mockito.when(services.isStopped()).thenReturn(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 4c19aa0a8244..6e2059d9f30b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -175,10 +175,15 @@ public void testLogRollOnNothingWritten() throws Exception { } } - private void assertLogFileSize(WAL log) { + private void assertLogFileSize(WAL log) throws InterruptedException { if (AbstractFSWALProvider.getNumRolledLogFiles(log) > 0) { assertTrue(AbstractFSWALProvider.getLogFileSize(log) > 0); } else { + for (int i = 0; i < 10; i++) { + if (AbstractFSWALProvider.getLogFileSize(log) != 0) { + Thread.sleep(10); + } + } assertEquals(0, AbstractFSWALProvider.getLogFileSize(log)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java index d062c77cb336..ecbd0432be18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; // imports for things that haven't moved from regionserver.wal yet. @@ -99,7 +100,8 @@ private enum AllowedOperations { * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index a899bdcb4538..656932bc117c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -687,7 +687,7 @@ public void testWALProviders() throws IOException { assertEquals(wrappedWALProvider.getClass(), walFactory.getMetaProvider().getClass()); // if providers are not set and do not enable SyncReplicationWALProvider - walFactory = new WALFactory(conf, this.currentServername.toString(), false); + walFactory = new WALFactory(conf, this.currentServername.toString(), null, false); assertEquals(walFactory.getWALProvider().getClass(), walFactory.getMetaProvider().getClass()); } From 5a474449c160b15a4a65b869df859d1027879136 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Sun, 11 Oct 2020 19:12:56 +0800 Subject: [PATCH 124/769] HBASE-24840 Avoid shows closing region task when create table (#2226) --- .../apache/hadoop/hbase/monitoring/TaskMonitor.java | 10 ++++++++-- .../org/apache/hadoop/hbase/regionserver/HRegion.java | 10 ++++++++-- .../apache/hadoop/hbase/util/ModifyRegionUtils.java | 2 +- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index 1bde91553628..d2edaa8b1d84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -87,8 +87,12 @@ public static synchronized TaskMonitor get() { } return instance; } - + public synchronized MonitoredTask createStatus(String description) { + return createStatus(description, false); + } + + public synchronized MonitoredTask createStatus(String description, boolean ignore) { MonitoredTask stat = new MonitoredTaskImpl(); stat.setDescription(description); MonitoredTask proxy = (MonitoredTask) Proxy.newProxyInstance( @@ -99,7 +103,9 @@ public synchronized MonitoredTask createStatus(String description) { if (tasks.isFull()) { purgeExpiredTasks(); } - tasks.add(pair); + if (!ignore) { + tasks.add(pair); + } return proxy; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index a208d9330042..57a1e1f5de93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1578,6 +1578,10 @@ public Map> close() throws IOException { */ public static final long MAX_FLUSH_PER_CHANGES = 1000000000; // 1G + public Map> close(boolean abort) throws IOException { + return close(abort, false); + } + /** * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. @@ -1586,6 +1590,7 @@ public Map> close() throws IOException { * time-sensitive thread. * * @param abort true if server is aborting (only during testing) + * @param ignoreStatus true if ignore the status (wont be showed on task list) * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of StoreFile objects. Can be null if * we are not to close at this time or we are already closed. @@ -1595,12 +1600,13 @@ public Map> close() throws IOException { * because a Snapshot was not properly persisted. The region is put in closing mode, and the * caller MUST abort after this. */ - public Map> close(boolean abort) throws IOException { + public Map> close(boolean abort, boolean ignoreStatus) + throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus( "Closing region " + this.getRegionInfo().getEncodedName() + - (abort ? " due to abort" : "")); + (abort ? " due to abort" : ""), ignoreStatus); status.enableStatusJournal(true); status.setStatus("Waiting for close lock"); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index b4e586392cf2..a3a0c7b23a63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -186,7 +186,7 @@ public static RegionInfo createRegion(final Configuration conf, final Path rootD } } finally { // 3. Close the new region to flush to disk. Close log file too. - region.close(); + region.close(false, true); } return region.getRegionInfo(); } From c8c860c906d1deb07ef311b896fd3b63379ae78a Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Sun, 11 Oct 2020 06:52:11 -0700 Subject: [PATCH 125/769] HBASE-25156 TestMasterFailover.testSimpleMasterFailover is flaky (ADDENDUM) (#2529) Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/master/TestMasterFailover.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 1e37fcb52b58..2be53a6d26bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -129,7 +129,7 @@ public void testSimpleMasterFailover() throws Exception { // wait for the active master to acknowledge loss of the backup from ZK final HMaster activeFinal = active; TEST_UTIL.waitFor( - TimeUnit.SECONDS.toMillis(30), () -> activeFinal.getBackupMasters().size() == 1); + TimeUnit.MINUTES.toMillis(5), () -> activeFinal.getBackupMasters().size() == 1); // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); From 3d1aaa66324c46b432e546b4dfa5bdf5191224ed Mon Sep 17 00:00:00 2001 From: niuyulin Date: Sun, 11 Oct 2020 10:11:32 -0500 Subject: [PATCH 126/769] HBASE-25175 Remove the constructors of HBaseConfiguration (#2530) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel --- .../hadoop/hbase/HBaseConfiguration.java | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 67de5fb3a21b..70467f08aa01 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -36,36 +36,6 @@ public class HBaseConfiguration extends Configuration { private static final Logger LOG = LoggerFactory.getLogger(HBaseConfiguration.class); - /** - * Instantiating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create() to construct a plain Configuration - * @deprecated since 0.90.0. Please use {@link #create()} instead. - * @see #create() - * @see HBASE-2036 - */ - @Deprecated - public HBaseConfiguration() { - //TODO:replace with private constructor, HBaseConfiguration should not extend Configuration - super(); - addHbaseResources(this); - LOG.warn("instantiating HBaseConfiguration() is deprecated. Please use" - + " HBaseConfiguration#create() to construct a plain Configuration"); - } - - /** - * Instantiating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create(conf) to construct a plain Configuration - * @deprecated since 0.90.0. Please use {@link #create(Configuration)} instead. - * @see #create(Configuration) - * @see HBASE-2036 - */ - @Deprecated - public HBaseConfiguration(final Configuration c) { - //TODO:replace with private constructor - this(); - merge(this, c); - } - private static void checkDefaultsVersion(Configuration conf) { if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return; String defaultsVersion = conf.get("hbase.defaults.for.version"); From 8eea05235929a16706a83b759d98f9336f10fd49 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Sun, 11 Oct 2020 22:02:37 -0500 Subject: [PATCH 127/769] HBASE-25093 the RSGroupBasedLoadBalancer#retainAssignment throws NPE (#2450) Signed-off-by: Duo Zhang --- .../favored/FavoredNodeLoadBalancer.java | 3 +++ .../hadoop/hbase/master/LoadBalancer.java | 5 ++-- .../master/assignment/AssignmentManager.java | 6 +---- .../master/balancer/BaseLoadBalancer.java | 27 +++++++++++-------- .../balancer/FavoredStochasticBalancer.java | 10 ++++--- .../rsgroup/RSGroupBasedLoadBalancer.java | 23 ++++++++-------- .../apache/hadoop/hbase/TestZooKeeper.java | 3 +++ 7 files changed, 45 insertions(+), 32 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index 8cde76e07c60..60a2c6cae13f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -22,12 +22,14 @@ import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.SECONDARY; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.TERTIARY; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -161,6 +163,7 @@ public List balanceTable(TableName tableName, } @Override + @NonNull public Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { Map> assignmentMap; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index b7ec1a3aa1bc..90cb3946f8b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hbase.master; -import edu.umd.cs.findbugs.annotations.Nullable; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.List; import java.util.Map; @@ -110,6 +110,7 @@ List balanceTable(TableName tableName, * Perform a Round Robin assignment of regions. * @return Map of servername to regioninfos */ + @NonNull Map> roundRobinAssignment(List regions, List servers) throws IOException; @@ -117,7 +118,7 @@ Map> roundRobinAssignment(List regions, * Assign regions to the previously hosting region server * @return List of plans */ - @Nullable + @NonNull Map> retainAssignment(Map regions, List servers) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index fb64514a3377..f23d17026f3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -2165,12 +2165,8 @@ private void acceptPlan(final HashMap regions, final ProcedureEvent[] events = new ProcedureEvent[regions.size()]; final long st = System.currentTimeMillis(); - if (plan == null) { - throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); - } - if (plan.isEmpty()) { - return; + throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); } int evcount = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 6a27a6a05680..a47bff26a090 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -1132,11 +1133,9 @@ protected List balanceMasterRegions(Map * If master is configured to carry system tables only, in here is * where we figure what to assign it. */ + @NonNull protected Map> assignMasterSystemRegions( Collection regions, List servers) { - if (servers == null || regions == null || regions.isEmpty()) { - return null; - } Map> assignments = new TreeMap<>(); if (this.maintenanceMode || this.onlySystemTablesOnMaster) { if (masterServerName != null && servers.contains(masterServerName)) { @@ -1267,15 +1266,16 @@ protected final boolean idleRegionServerExist(Cluster c){ * * @param regions all regions * @param servers all servers - * @return map of server to the regions it should take, or null if no - * assignment is possible (ie. no regions or no servers) + * @return map of server to the regions it should take, or emptyMap if no + * assignment is possible (ie. no servers) */ @Override + @NonNull public Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterSystemRegions(regions, servers); - if (assignments != null && !assignments.isEmpty()) { + if (!assignments.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -1285,14 +1285,17 @@ public Map> roundRobinAssignment(List r regions.removeAll(masterRegions); } } - if (this.maintenanceMode || regions == null || regions.isEmpty()) { + /** + * only need assign system table + */ + if (this.maintenanceMode || regions.isEmpty()) { return assignments; } int numServers = servers == null ? 0 : servers.size(); if (numServers == 0) { LOG.warn("Wanted to do round robin assignment but no servers to assign to"); - return null; + return Collections.emptyMap(); } // TODO: instead of retainAssignment() and roundRobinAssignment(), we should just run the @@ -1407,15 +1410,17 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve * * @param regions regions and existing assignment from meta * @param servers available servers - * @return map of servers and regions to be assigned to them + * @return map of servers and regions to be assigned to them, or emptyMap if no + * assignment is possible (ie. no servers) */ @Override + @NonNull public Map> retainAssignment(Map regions, List servers) throws HBaseIOException { // Update metrics metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterSystemRegions(regions.keySet(), servers); - if (assignments != null && !assignments.isEmpty()) { + if (!assignments.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -1430,7 +1435,7 @@ public Map> retainAssignment(Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { @@ -116,7 +119,7 @@ public Map> roundRobinAssignment(List r Set regionSet = Sets.newHashSet(regions); Map> assignmentMap = assignMasterSystemRegions(regions, servers); - if (assignmentMap != null && !assignmentMap.isEmpty()) { + if (!assignmentMap.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -367,14 +370,15 @@ private void updateFavoredNodesForRegion(RegionInfo regionInfo, List * Reuse BaseLoadBalancer's retainAssignment, but generate favored nodes when its missing. */ @Override + @NonNull public Map> retainAssignment(Map regions, List servers) throws HBaseIOException { Map> assignmentMap = Maps.newHashMap(); Map> result = super.retainAssignment(regions, servers); - if (result == null || result.isEmpty()) { + if (result.isEmpty()) { LOG.warn("Nothing to assign to, probably no servers or no regions"); - return null; + return result; } // Guarantee not to put other regions on master diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 50ddb416e911..db61c01dec9b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.rsgroup; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -174,25 +175,25 @@ public List balanceCluster( } @Override - public Map> roundRobinAssignment( - List regions, List servers) throws IOException { + @NonNull + public Map> roundRobinAssignment(List regions, + List servers) throws IOException { Map> assignments = Maps.newHashMap(); List, List>> pairs = generateGroupAssignments(regions, servers); for (Pair, List> pair : pairs) { - Map> result = this.internalBalancer - .roundRobinAssignment(pair.getFirst(), pair.getSecond()); - if (result != null) { - result.forEach((server, regionInfos) -> - assignments.computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); - } + Map> result = + this.internalBalancer.roundRobinAssignment(pair.getFirst(), pair.getSecond()); + result.forEach((server, regionInfos) -> assignments + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); } return assignments; } @Override + @NonNull public Map> retainAssignment(Map regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { try { Map> assignments = new TreeMap<>(); List, List>> pairs = @@ -203,8 +204,8 @@ public Map> retainAssignment(Map currentAssignmentMap.put(r, regions.get(r))); Map> pairResult = this.internalBalancer.retainAssignment(currentAssignmentMap, pair.getSecond()); - pairResult.forEach((server, rs) -> - assignments.computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); + pairResult.forEach((server, rs) -> assignments + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); } return assignments; } catch (IOException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 4bbb3ed9c178..c0eacae0a18d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -21,8 +21,10 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import edu.umd.cs.findbugs.annotations.NonNull; import java.util.List; import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -280,6 +282,7 @@ static class MockLoadBalancer extends SimpleLoadBalancer { static boolean retainAssignCalled = false; @Override + @NonNull public Map> retainAssignment( Map regions, List servers) throws HBaseIOException { retainAssignCalled = true; From c28220522d71edfde3424d0413cd19a5bdff6e60 Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 9 Oct 2020 21:00:30 -0700 Subject: [PATCH 128/769] HBASE-25168 Unify WAL name timestamp parsers Signed-off-by: Duo Zhang Signed-off-by: Peter Somogyi --- .../hbase/mapreduce/WALInputFormat.java | 2 +- .../hbase/wal/AbstractFSWALProvider.java | 54 +++++++++++++------ .../java/org/apache/hadoop/hbase/wal/WAL.java | 29 ---------- 3 files changed, 39 insertions(+), 46 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index b410fc22d891..14bfec72efe8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -336,7 +336,7 @@ private List getFiles(FileSystem fs, Path dir, long startTime, long static void addFile(List result, LocatedFileStatus lfs, long startTime, long endTime) { - long timestamp = WAL.getTimestamp(lfs.getPath().getName()); + long timestamp = AbstractFSWALProvider.getTimestamp(lfs.getPath().getName()); if (timestamp > 0) { // Looks like a valid timestamp. if (timestamp <= endTime && timestamp >= startTime) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 84c94e608168..109e1107669e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -258,32 +258,37 @@ static void requestLogRoll(final WAL wal) { public static final String SPLITTING_EXT = "-splitting"; /** - * It returns the file create timestamp from the file name. For name format see + * Pattern used to validate a WAL file name see {@link #validateWALFilename(String)} for + * description. + */ + private static final Pattern WAL_FILE_NAME_PATTERN = + Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); + + /** + * Define for when no timestamp found. + */ + private static final long NO_TIMESTAMP = -1L; + + /** + * It returns the file create timestamp (the 'FileNum') from the file name. For name format see * {@link #validateWALFilename(String)} public until remaining tests move to o.a.h.h.wal * @param wal must not be null * @return the file number that is part of the WAL file name */ @VisibleForTesting public static long extractFileNumFromWAL(final WAL wal) { - final Path walName = ((AbstractFSWAL) wal).getCurrentFileName(); - if (walName == null) { + final Path walPath = ((AbstractFSWAL) wal).getCurrentFileName(); + if (walPath == null) { throw new IllegalArgumentException("The WAL path couldn't be null"); } - Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(walName.getName()); - if (matcher.matches()) { - return Long.parseLong(matcher.group(2)); - } else { - throw new IllegalArgumentException(walName.getName() + " is not a valid wal file name"); + String name = walPath.getName(); + long timestamp = getTimestamp(name); + if (timestamp == NO_TIMESTAMP) { + throw new IllegalArgumentException(name + " is not a valid wal file name"); } + return timestamp; } - /** - * Pattern used to validate a WAL file name see {@link #validateWALFilename(String)} for - * description. - */ - private static final Pattern WAL_FILE_NAME_PATTERN = - Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); - /** * A WAL file name is of the format: <wal-name>{@link #WAL_FILE_NAME_DELIMITER} * <file-creation-timestamp>[.<suffix>]. provider-name is usually made up of a @@ -295,6 +300,23 @@ public static boolean validateWALFilename(String filename) { return WAL_FILE_NAME_PATTERN.matcher(filename).matches(); } + /** + * Split a WAL filename to get a start time. WALs usually have the time we start writing to them + * with as part of their name, usually the suffix. Sometimes there will be an extra suffix as when + * it is a WAL for the meta table. For example, WALs might look like this + * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a + * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have + * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending + * order. Here is an example: 0000000000000016310. Allow for this. + * @param name Name of the WAL file. + * @return Timestamp or {@link #NO_TIMESTAMP}. + */ + public static long getTimestamp(String name) { + Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(name); + return matcher.matches() ? Long.parseLong(matcher.group(2)): NO_TIMESTAMP; + } + /** * Construct the directory name for all WALs on a given server. Dir names currently look like this * for WALs: hbase//WALs/kalashnikov.att.net,61634,1486865297088. @@ -444,7 +466,7 @@ public int compare(Path o1, Path o2) { * @return start time */ private static long getTS(Path p) { - return WAL.getTimestamp(p.getName()); + return getTimestamp(p.getName()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 20379fd7fe9e..747b2770d457 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -32,7 +32,6 @@ import org.apache.yetus.audience.InterfaceStability; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import static org.apache.commons.lang3.StringUtils.isNumeric; /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides @@ -300,32 +299,4 @@ public String toString() { return this.key + "=" + this.edit; } } - - /** - * Split a WAL filename to get a start time. WALs usually have the time we start writing to them - * as part of their name, usually the suffix. Sometimes there will be an extra suffix as when it - * is a WAL for the meta table. For example, WALs might look like this - * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the - * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a - * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have - * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending - * order. Here is an example: 0000000000000016310. Allow for this. - * @param name Name of the WAL file. - * @return Timestamp or -1. - */ - public static long getTimestamp(String name) { - String [] splits = name.split("\\."); - if (splits.length <= 1) { - return -1; - } - String timestamp = splits[splits.length - 1]; - if (!isNumeric(timestamp)) { - // Its a '.meta' or a '.syncrep' suffix. - timestamp = splits[splits.length - 2]; - if (!isNumeric(timestamp)) { - return -1; - } - } - return Long.parseLong(timestamp); - } } From 92c3bcd9fbc108dd7699d75486e85707e3e4514f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 13 Oct 2020 09:43:56 +0800 Subject: [PATCH 129/769] HBASE-25164 Make ModifyTableProcedure support changing meta replica count (#2513) Signed-off-by: Michael Stack --- .../org/apache/hadoop/hbase/HConstants.java | 13 ++ .../hbase/IntegrationTestMetaReplicas.java | 2 +- .../apache/hadoop/hbase/master/HMaster.java | 60 ++++--- .../hbase/master/MasterMetaBootstrap.java | 111 ------------- .../master/assignment/AssignmentManager.java | 19 ++- .../master/assignment/RegionStateStore.java | 157 +++++++++--------- .../master/procedure/InitMetaProcedure.java | 6 +- .../hadoop/hbase/util/FSTableDescriptors.java | 9 +- .../resources/hbase-webapps/master/table.jsp | 4 +- .../client/AbstractTestRegionLocator.java | 4 +- .../client/MetaWithReplicasTestBase.java | 5 +- .../hbase/client/RegionReplicaTestHelper.java | 8 +- .../TestAsyncAdminWithRegionReplicas.java | 8 +- .../client/TestAsyncMetaRegionLocator.java | 3 +- .../client/TestAsyncTableUseMetaReplicas.java | 3 +- .../hbase/client/TestCleanupMetaReplica.java | 55 ++++++ .../TestCleanupMetaReplicaThroughConfig.java | 69 ++++++++ .../TestFailedMetaReplicaAssigment.java | 47 +++++- .../TestIncreaseMetaReplicaThroughConfig.java | 68 ++++++++ .../hbase/client/TestMasterRegistry.java | 5 +- .../client/TestMetaRegionLocationCache.java | 4 +- .../client/TestMetaWithReplicasBasic.java | 25 +-- .../hbase/client/TestReplicaWithCluster.java | 6 +- .../client/TestZKConnectionRegistry.java | 10 +- .../hbase/master/AlwaysStandByHMaster.java | 2 +- .../TestReadAndWriteRegionInfoFile.java | 2 +- 26 files changed, 410 insertions(+), 295 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 5b4b6fb4bd63..41bf487de055 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1131,7 +1131,20 @@ public enum OperationStatusCode { /** Conf key for enabling meta replication */ public static final String USE_META_REPLICAS = "hbase.meta.replicas.use"; public static final boolean DEFAULT_USE_META_REPLICAS = false; + + /** + * @deprecated Since 2.4.0, will be removed in 4.0.0. Please change the meta replicas number by + * altering meta table, i.e, set a new 'region replication' number and call + * modifyTable. + */ + @Deprecated public static final String META_REPLICAS_NUM = "hbase.meta.replica.count"; + /** + * @deprecated Since 2.4.0, will be removed in 4.0.0. Please change the meta replicas number by + * altering meta table, i.e, set a new 'region replication' number and call + * modifyTable. + */ + @Deprecated public static final int DEFAULT_META_REPLICA_NUM = 1; /** diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java index d906bfd8420c..05e203607f53 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java @@ -53,11 +53,11 @@ public static void setUp() throws Exception { if (util == null) { util = new IntegrationTestingUtility(); } - util.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); util.getConfiguration().setInt( StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); // Make sure there are three servers. util.initializeCluster(3); + HBaseTestingUtility.setReplicas(util.getAdmin(), TableName.META_TABLE_NAME, 3); ZKWatcher zkw = util.getZooKeeperWatcher(); Configuration conf = util.getConfiguration(); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 9c617bbe7f8e..85ac5e0b0490 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1015,10 +1015,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc status.setStatus("Initializing meta table if this is a new deploy"); InitMetaProcedure initMetaProc = null; // Print out state of hbase:meta on startup; helps debugging. - RegionState rs = this.assignmentManager.getRegionStates(). - getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO); - LOG.info("hbase:meta {}", rs); - if (rs != null && rs.isOffline()) { + if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) { Optional optProc = procedureExecutor.getProcedures().stream() .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); initMetaProc = optProc.orElseGet(() -> { @@ -1070,6 +1067,39 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc this.assignmentManager.joinCluster(); // The below depends on hbase:meta being online. this.tableStateManager.start(); + + // for migrating from a version without HBASE-25099, and also for honoring the configuration + // first. + if (conf.get(HConstants.META_REPLICAS_NUM) != null) { + int replicasNumInConf = + conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); + TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME); + if (metaDesc.getRegionReplication() != replicasNumInConf) { + // it is possible that we already have some replicas before upgrading, so we must set the + // region replication number in meta TableDescriptor directly first, without creating a + // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas. + int existingReplicasCount = + assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); + if (existingReplicasCount > metaDesc.getRegionReplication()) { + LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) + .setRegionReplication(existingReplicasCount).build(); + tableDescriptors.update(metaDesc); + } + // check again, and issue a ModifyTableProcedure if needed + if (metaDesc.getRegionReplication() != replicasNumInConf) { + LOG.info( + "The {} config is {} while the replica count in TableDescriptor is {}" + + " for hbase:meta, altering...", + HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); + procedureExecutor.submitProcedure(new ModifyTableProcedure( + procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) + .setRegionReplication(replicasNumInConf).build(), + null, metaDesc, false)); + } + } + } // Below has to happen after tablestatemanager has started in the case where this hbase-2.x // is being started over an hbase-1.x dataset. tablestatemanager runs a migration as part // of its 'start' moving table state from zookeeper to hbase:meta. This migration needs to @@ -1133,13 +1163,6 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc } assignmentManager.checkIfShouldMoveSystemRegionAsync(); - status.setStatus("Assign meta replicas"); - MasterMetaBootstrap metaBootstrap = createMetaBootstrap(); - try { - metaBootstrap.assignMetaReplicas(); - } catch (IOException | KeeperException e){ - LOG.error("Assigning meta replica failed: ", e); - } status.setStatus("Starting quota manager"); initQuotaManager(); if (QuotaUtil.isQuotaEnabled(conf)) { @@ -1294,21 +1317,6 @@ private void initMobCleaner() { getChoreService().scheduleChore(mobFileCompactionChore); } - /** - *

    - * Create a {@link MasterMetaBootstrap} instance. - *

    - *

    - * Will be overridden in tests. - *

    - */ - @VisibleForTesting - protected MasterMetaBootstrap createMetaBootstrap() { - // We put this out here in a method so can do a Mockito.spy and stub it out - // w/ a mocked up MasterMetaBootstrap. - return new MasterMetaBootstrap(this); - } - /** *

    * Create a {@link ServerManager} instance. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java deleted file mode 100644 index 0b3476fc9dd5..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; -import java.util.List; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Used by the HMaster on startup to split meta logs and assign the meta table. - */ -@InterfaceAudience.Private -class MasterMetaBootstrap { - private static final Logger LOG = LoggerFactory.getLogger(MasterMetaBootstrap.class); - - private final HMaster master; - - public MasterMetaBootstrap(HMaster master) { - this.master = master; - } - - /** - * For assigning hbase:meta replicas only. - * TODO: The way this assign runs, nothing but chance to stop all replicas showing up on same - * server as the hbase:meta region. - */ - void assignMetaReplicas() - throws IOException, InterruptedException, KeeperException { - int numReplicas = master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); - final AssignmentManager assignmentManager = master.getAssignmentManager(); - if (!assignmentManager.isMetaLoaded()) { - throw new IllegalStateException("hbase:meta must be initialized first before we can " + - "assign out its replicas"); - } - ServerName metaServername = MetaTableLocator.getMetaRegionLocation(this.master.getZooKeeper()); - for (int i = 1; i < numReplicas; i++) { - // Get current meta state for replica from zk. - RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i); - RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, i); - LOG.debug(hri.getRegionNameAsString() + " replica region state from zookeeper=" + metaState); - if (metaServername.equals(metaState.getServerName())) { - metaState = null; - LOG.info(hri.getRegionNameAsString() + - " old location is same as current hbase:meta location; setting location as null..."); - } - // These assigns run inline. All is blocked till they complete. Only interrupt is shutting - // down hosting server which calls AM#stop. - if (metaState != null && metaState.getServerName() != null) { - // Try to retain old assignment. - assignmentManager.assignAsync(hri, metaState.getServerName()); - } else { - assignmentManager.assignAsync(hri); - } - } - unassignExcessMetaReplica(numReplicas); - } - - private void unassignExcessMetaReplica(int numMetaReplicasConfigured) { - final ZKWatcher zooKeeper = master.getZooKeeper(); - // unassign the unneeded replicas (for e.g., if the previous master was configured - // with a replication of 3 and now it is 2, we need to unassign the 1 unneeded replica) - try { - List metaReplicaZnodes = zooKeeper.getMetaReplicaNodes(); - for (String metaReplicaZnode : metaReplicaZnodes) { - int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaZnode); - if (replicaId >= numMetaReplicasConfigured) { - RegionState r = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId); - LOG.info("Closing excess replica of meta region " + r.getRegion()); - // send a close and wait for a max of 30 seconds - ServerManager.closeRegionSilentlyAndWait(master.getAsyncClusterConnection(), - r.getServerName(), r.getRegion(), 30000); - ZKUtil.deleteNode(zooKeeper, zooKeeper.getZNodePaths().getZNodeForReplica(replicaId)); - } - } - } catch (Exception ex) { - // ignore the exception since we don't want the master to be wedged due to potential - // issues in the cleanup of the extra regions. We can do that cleanup via hbck or manually - LOG.warn("Ignoring exception " + ex); - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index f23d17026f3c..9a88533f3565 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -228,13 +229,18 @@ public void start() throws IOException, KeeperException { // load meta region state ZKWatcher zkw = master.getZooKeeper(); // it could be null in some tests - if (zkw != null) { + if (zkw == null) { + return; + } + List metaZNodes = zkw.getMetaReplicaNodes(); + LOG.debug("hbase:meta replica znodes: {}", metaZNodes); + for (String metaZNode : metaZNodes) { + int replicaId = zkw.getZNodePaths().getMetaReplicaIdFromZNode(metaZNode); // here we are still in the early steps of active master startup. There is only one thread(us) // can access AssignmentManager and create region node, so here we do not need to lock the // region node. - RegionState regionState = MetaTableLocator.getMetaRegionState(zkw); - RegionStateNode regionNode = - regionStates.getOrCreateRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO); + RegionState regionState = MetaTableLocator.getMetaRegionState(zkw, replicaId); + RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(regionState.getRegion()); regionNode.setRegionLocation(regionState.getServerName()); regionNode.setState(regionState.getState()); if (regionNode.getProcedure() != null) { @@ -243,7 +249,10 @@ public void start() throws IOException, KeeperException { if (regionState.getServerName() != null) { regionStates.addRegionToServer(regionNode); } - setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN); + if (RegionReplicaUtil.isDefaultReplica(replicaId)) { + setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN); + } + LOG.debug("Loaded hbase:meta {}", regionNode); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 78f2bb75fe8c..c8b0e351f4e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -64,6 +64,8 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; @@ -126,24 +128,23 @@ public boolean visit(final Result r) throws IOException { } /** - * Queries META table for the passed region encoded name, - * delegating action upon results to the RegionStateVisitor - * passed as second parameter. + * Queries META table for the passed region encoded name, delegating action upon results to the + * RegionStateVisitor passed as second parameter. * @param regionEncodedName encoded name for the Region we want to query META for. * @param visitor The RegionStateVisitor instance to react over the query results. * @throws IOException If some error occurs while querying META or parsing results. */ public void visitMetaForRegion(final String regionEncodedName, final RegionStateVisitor visitor) - throws IOException { - Result result = MetaTableAccessor. - scanByRegionEncodedName(master.getConnection(), regionEncodedName); + throws IOException { + Result result = + MetaTableAccessor.scanByRegionEncodedName(master.getConnection(), regionEncodedName); if (result != null) { visitMetaEntry(visitor, result); } } private void visitMetaEntry(final RegionStateVisitor visitor, final Result result) - throws IOException { + throws IOException { final RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result); if (rl == null) return; @@ -178,18 +179,18 @@ void updateRegionLocation(RegionStateNode regionStateNode) throws IOException { updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(), regionStateNode.getState()); } else { - long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() - : HConstants.NO_SEQNUM; + long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() : + HConstants.NO_SEQNUM; updateUserRegionLocation(regionStateNode.getRegionInfo(), regionStateNode.getState(), regionStateNode.getRegionLocation(), openSeqNum, // The regionStateNode may have no procedure in a test scenario; allow for this. - regionStateNode.getProcedure() != null ? regionStateNode.getProcedure().getProcId() - : Procedure.NO_PROC_ID); + regionStateNode.getProcedure() != null ? regionStateNode.getProcedure().getProcId() : + Procedure.NO_PROC_ID); } } private void updateMetaLocation(RegionInfo regionInfo, ServerName serverName, State state) - throws IOException { + throws IOException { try { MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName, regionInfo.getReplicaId(), state); @@ -199,8 +200,7 @@ private void updateMetaLocation(RegionInfo regionInfo, ServerName serverName, St } private void updateUserRegionLocation(RegionInfo regionInfo, State state, - ServerName regionLocation, long openSeqNum, - long pid) throws IOException { + ServerName regionLocation, long openSeqNum, long pid) throws IOException { long time = EnvironmentEdgeManager.currentTime(); final int replicaId = regionInfo.getReplicaId(); final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time); @@ -210,7 +210,7 @@ private void updateUserRegionLocation(RegionInfo regionInfo, State state, .append(regionInfo.getEncodedName()).append(", regionState=").append(state); if (openSeqNum >= 0) { Preconditions.checkArgument(state == State.OPEN && regionLocation != null, - "Open region should be on a server"); + "Open region should be on a server"); MetaTableAccessor.addLocation(put, regionLocation, openSeqNum, replicaId); // only update replication barrier for default replica if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID && @@ -223,30 +223,23 @@ private void updateUserRegionLocation(RegionInfo regionInfo, State state, } else if (regionLocation != null) { // Ideally, if no regionLocation, write null to the hbase:meta but this will confuse clients // currently; they want a server to hit. TODO: Make clients wait if no location. - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getServerNameColumn(replicaId)) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(regionLocation.getServerName())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getServerNameColumn(replicaId)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(regionLocation.getServerName())).build()); info.append(", regionLocation=").append(regionLocation); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(getStateColumn(replicaId)) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(state.name())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getStateColumn(replicaId)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name())) + .build()); LOG.info(info.toString()); updateRegionLocation(regionInfo, state, put); } private void updateRegionLocation(RegionInfo regionInfo, State state, Put put) - throws IOException { + throws IOException { try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) { table.put(put); } catch (IOException e) { @@ -319,7 +312,7 @@ private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws } // ============================================================================================ - // Update Region Splitting State helpers + // Update Region Splitting State helpers // ============================================================================================ /** * Splits the region into two in an atomic operation. Offlines the parent region with the @@ -370,7 +363,7 @@ public void splitRegion(RegionInfo parent, RegionInfo splitA, RegionInfo splitB, } // ============================================================================================ - // Update Region Merging State helpers + // Update Region Merging State helpers // ============================================================================================ public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serverName, TableDescriptor htd) throws IOException { @@ -378,7 +371,7 @@ public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serv long time = HConstants.LATEST_TIMESTAMP; List mutations = new ArrayList<>(); List replicationParents = new ArrayList<>(); - for (RegionInfo ri: parents) { + for (RegionInfo ri : parents) { long seqNum = globalScope ? getOpenSeqNumForParentRegion(ri) : -1; // Deletes for merging regions mutations.add(MetaTableAccessor.makeDeleteFromRegionInfo(ri, time)); @@ -438,8 +431,7 @@ public List getMergeRegions(RegionInfo region) throws IOException { * @param connection connection we're using * @param mergeRegion the merged region */ - public void deleteMergeQualifiers(RegionInfo mergeRegion) - throws IOException { + public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException { // NOTE: We are doing a new hbase:meta read here. Cell[] cells = getRegionCatalogResult(mergeRegion).rawCells(); if (cells == null || cells.length == 0) { @@ -493,7 +485,7 @@ static Put addMergeRegions(Put put, Collection mergeRegions) throws } // ============================================================================================ - // Delete Region State helpers + // Delete Region State helpers // ============================================================================================ /** * Deletes the specified region. @@ -554,39 +546,52 @@ private Scan getScanForUpdateRegionReplicas(TableName tableName) { public void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) throws IOException { - Scan scan = getScanForUpdateRegionReplicas(tableName); - List deletes = new ArrayList<>(); - long now = EnvironmentEdgeManager.currentTime(); - try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { - for (;;) { - Result result = scanner.next(); - if (result == null) { - break; - } - RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); - if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { - continue; - } - Delete delete = new Delete(result.getRow()); + if (TableName.isMetaTableName(tableName)) { + ZKWatcher zk = master.getZooKeeper(); + try { for (int i = newReplicaCount; i < oldReplicaCount; i++) { - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), - now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), - now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getRegionStateColumn(i), - now); + ZKUtil.deleteNode(zk, zk.getZNodePaths().getZNodeForReplica(i)); } - deletes.add(delete); + } catch (KeeperException e) { + throw new IOException(e); + } + } else { + Scan scan = getScanForUpdateRegionReplicas(tableName); + List deletes = new ArrayList<>(); + long now = EnvironmentEdgeManager.currentTime(); + try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); + if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { + continue; + } + Delete delete = new Delete(result.getRow()); + for (int i = newReplicaCount; i < oldReplicaCount; i++) { + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getRegionStateColumn(i), now); + } + deletes.add(delete); + } + debugLogMutations(deletes); + metaTable.delete(deletes); } - debugLogMutations(deletes); - metaTable.delete(deletes); } } // ========================================================================== - // Table Descriptors helpers + // Table Descriptors helpers // ========================================================================== private boolean hasGlobalReplicationScope(TableName tableName) throws IOException { return hasGlobalReplicationScope(getDescriptor(tableName)); @@ -605,7 +610,7 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { } // ========================================================================== - // Region State + // Region State // ========================================================================== /** @@ -613,29 +618,29 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { * @return the region state, or null if unknown. */ public static State getRegionState(final Result r, RegionInfo regionInfo) { - Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, - getStateColumn(regionInfo.getReplicaId())); + Cell cell = + r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(regionInfo.getReplicaId())); if (cell == null || cell.getValueLength() == 0) { return null; } - String state = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); + String state = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); try { return State.valueOf(state); } catch (IllegalArgumentException e) { - LOG.warn("BAD value {} in hbase:meta info:state column for region {} , " + - "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", - state, regionInfo.getEncodedName()); + LOG.warn( + "BAD value {} in hbase:meta info:state column for region {} , " + + "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", + state, regionInfo.getEncodedName()); return null; } } private static byte[] getStateColumn(int replicaId) { - return replicaId == 0 - ? HConstants.STATE_QUALIFIER - : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.STATE_QUALIFIER : + Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } private static void debugLogMutations(List mutations) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java index f158452296c8..e92fc110aba2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -83,9 +82,8 @@ private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) t // created here in bootstrap and it'll need to be cleaned up. Better to // not make it in first place. Turn off block caching for bootstrap. // Enable after. - TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, - rootDir, builder -> builder.setRegionReplication( - conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM))); + TableDescriptor metaDescriptor = + FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir); HRegion .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null) .close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index afff1c139311..937069f92be0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.lang3.NotImplementedException; @@ -121,20 +120,16 @@ public FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean @VisibleForTesting public static void tryUpdateMetaTableDescriptor(Configuration conf) throws IOException { tryUpdateAndGetMetaTableDescriptor(conf, CommonFSUtils.getCurrentFileSystem(conf), - CommonFSUtils.getRootDir(conf), null); + CommonFSUtils.getRootDir(conf)); } public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration conf, - FileSystem fs, Path rootdir, - Function metaObserver) throws IOException { + FileSystem fs, Path rootdir) throws IOException { // see if we already have meta descriptor on fs. Write one if not. try { return getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME); } catch (TableInfoMissingException e) { TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf); - if (metaObserver != null) { - builder = metaObserver.apply(builder); - } TableDescriptor td = builder.build(); LOG.info("Creating new hbase:meta table descriptor {}", td); TableName tableName = td.getTableName(); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 25b5979ae8c8..b800e72f37b7 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -155,8 +155,8 @@ Table table = master.getConnection().getTable(TableName.valueOf(fqtn)); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf); - int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int numMetaReplicas = + master.getTableDescriptors().get(TableName.META_TABLE_NAME).getRegionReplication(); Map frags = null; if (showFragmentation) { frags = FSUtils.getTableFragmentation(master); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java index 89f287bed1b3..09a081317517 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java @@ -47,8 +47,8 @@ public abstract class AbstractTestRegionLocator { protected static byte[][] SPLIT_KEYS; protected static void startClusterAndCreateTable() throws Exception { - UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, REGION_REPLICATION); UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, REGION_REPLICATION); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(REGION_REPLICATION) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); @@ -59,7 +59,7 @@ protected static void startClusterAndCreateTable() throws Exception { UTIL.getAdmin().createTable(td, SPLIT_KEYS); UTIL.waitTableAvailable(TABLE_NAME); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) { + ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java index 78e3e541e895..8cfad19f66dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java @@ -26,9 +26,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; @@ -56,12 +56,13 @@ public class MetaWithReplicasTestBase { protected static void startCluster() throws Exception { TEST_UTIL.getConfiguration().setInt("zookeeper.session.timeout", 30000); - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TEST_UTIL.getConfiguration() .setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); StartMiniClusterOption option = StartMiniClusterOption.builder().numAlwaysStandByMasters(1) .numMasters(1).numRegionServers(REGIONSERVERS_COUNT).build(); TEST_UTIL.startMiniCluster(option); + Admin admin = TEST_UTIL.getAdmin(); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 3); AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); Set sns = new HashSet(); ServerName hbaseMetaServerName = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java index 8e562bd984c7..a2466a5cd7fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java @@ -20,13 +20,13 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLocations; @@ -44,10 +44,10 @@ private RegionReplicaTestHelper() { // waits for all replicas to have region location static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtility util, - ConnectionRegistry registry) { + ConnectionRegistry registry) throws IOException { Configuration conf = util.getConfiguration(); - int regionReplicaCount = util.getConfiguration().getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int regionReplicaCount = + util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); Waiter.waitFor(conf, conf.getLong("hbase.client.sync.wait.timeout.msec", 60000), 200, true, new ExplainingPredicate() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index 50111f7eddd5..3596f1c0025d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -28,6 +28,7 @@ import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -51,12 +52,11 @@ public class TestAsyncAdminWithRegionReplicas extends TestAsyncAdminBase { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TestAsyncAdminBase.setUpBeforeClass(); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) { - RegionReplicaTestHelper - .waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) { + RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index 003bef33a994..733787773aa0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -22,7 +22,6 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; @@ -50,8 +49,8 @@ public class TestAsyncMetaRegionLocator { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TEST_UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); TEST_UTIL.waitUntilNoRegionsInTransition(); REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 3485955b66a5..47d6ddb42947 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertArrayEquals; + import java.io.IOException; import java.util.Optional; import java.util.concurrent.ExecutionException; @@ -86,11 +87,11 @@ public void preScannerOpen(ObserverContext c, Scan @BeforeClass public static void setUp() throws Exception { Configuration conf = UTIL.getConfiguration(); - conf.setInt(HConstants.META_REPLICAS_NUM, 3); conf.setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FailPrimaryMetaScanCp.class.getName()); UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); try (ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf)) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java new file mode 100644 index 000000000000..15815e40d6b9 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MiscTests.class, MediumTests.class }) +public class TestCleanupMetaReplica extends MetaWithReplicasTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCleanupMetaReplica.class); + + @BeforeClass + public static void setUp() throws Exception { + startCluster(); + } + + @Test + public void testReplicaCleanup() throws Exception { + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + List metaReplicaZnodes = zkw.getMetaReplicaNodes(); + assertEquals(3, metaReplicaZnodes.size()); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 1); + metaReplicaZnodes = zkw.getMetaReplicaNodes(); + assertEquals(1, metaReplicaZnodes.size()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java new file mode 100644 index 000000000000..66f2df6bbc8c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Make sure we will honor the {@link HConstants#META_REPLICAS_NUM}. + */ +@Category({ MiscTests.class, MediumTests.class }) +public class TestCleanupMetaReplicaThroughConfig extends MetaWithReplicasTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCleanupMetaReplicaThroughConfig.class); + + @BeforeClass + public static void setUp() throws Exception { + startCluster(); + } + + @Test + public void testReplicaCleanup() throws Exception { + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + List metaReplicaZnodes = zkw.getMetaReplicaNodes(); + assertEquals(3, metaReplicaZnodes.size()); + + final HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); + master.stop("Restarting"); + TEST_UTIL.waitFor(30000, () -> master.isStopped()); + TEST_UTIL.getMiniHBaseCluster().getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 1); + + JVMClusterUtil.MasterThread newMasterThread = TEST_UTIL.getMiniHBaseCluster().startMaster(); + final HMaster newMaster = newMasterThread.getMaster(); + + // wait until new master finished meta replica assignment logic + TEST_UTIL.waitFor(30000, () -> newMaster.getMasterQuotaManager() != null); + TEST_UTIL.waitFor(30000, + () -> TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes().size() == 1); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java index 0c26d7934ffd..18235ebbcf96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java @@ -22,18 +22,23 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; -import java.util.concurrent.Future; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.AfterClass; @@ -53,7 +58,8 @@ public class TestFailedMetaReplicaAssigment { @BeforeClass public static void setUp() throws Exception { - // using our rigged master, to force a failed meta replica assignment + // using our rigged master, to force a failed meta replica assignment when start up master + // this test can be removed once we remove the HConstants.META_REPLICAS_NUM config. Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt(HConstants.META_REPLICAS_NUM, 3); StartMiniClusterOption option = StartMiniClusterOption.builder().numAlwaysStandByMasters(1) @@ -100,9 +106,20 @@ public void testFailedReplicaAssignment() throws InterruptedException { } public static class BrokenTransitRegionStateProcedure extends TransitRegionStateProcedure { - protected BrokenTransitRegionStateProcedure() { + + public BrokenTransitRegionStateProcedure() { super(null, null, null, false, TransitionType.ASSIGN); } + + public BrokenTransitRegionStateProcedure(MasterProcedureEnv env, RegionInfo hri) { + super(env, hri, null, false, TransitionType.ASSIGN); + } + + @Override + protected Procedure[] execute(MasterProcedureEnv env) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throw new ProcedureSuspendedException("Never end procedure!"); + } } public static class BrokenMetaReplicaMaster extends HMaster { @@ -124,12 +141,24 @@ public BrokenMasterMetaAssignmentManager(final MasterServices master) { this.master = master; } - public Future assignAsync(RegionInfo regionInfo, ServerName sn) throws IOException { - RegionStateNode regionNode = getRegionStates().getOrCreateRegionStateNode(regionInfo); - if (regionNode.getRegionInfo().getReplicaId() == 2) { - regionNode.setProcedure(new BrokenTransitRegionStateProcedure()); + @Override + public TransitRegionStateProcedure[] createAssignProcedures(List hris) { + List procs = new ArrayList<>(); + for (RegionInfo hri : hris) { + if (hri.isMetaRegion() && hri.getReplicaId() == 2) { + RegionStateNode regionNode = getRegionStates().getOrCreateRegionStateNode(hri); + regionNode.lock(); + try { + procs.add(regionNode.setProcedure(new BrokenTransitRegionStateProcedure( + master.getMasterProcedureExecutor().getEnvironment(), hri))); + } finally { + regionNode.unlock(); + } + } else { + procs.add(super.createAssignProcedures(Collections.singletonList(hri))[0]); + } } - return super.assignAsync(regionInfo, sn); + return procs.toArray(new TransitRegionStateProcedure[0]); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java new file mode 100644 index 000000000000..77a2a0c4423e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Make sure we will honor the {@link HConstants#META_REPLICAS_NUM}.And also test upgrading. + */ +@Category({ MiscTests.class, MediumTests.class }) +public class TestIncreaseMetaReplicaThroughConfig extends MetaWithReplicasTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIncreaseMetaReplicaThroughConfig.class); + + @BeforeClass + public static void setUp() throws Exception { + startCluster(); + } + + @Test + public void testUpgradeAndIncreaseReplicaCount() throws Exception { + HMaster oldMaster = TEST_UTIL.getMiniHBaseCluster().getMaster(); + TableDescriptors oldTds = oldMaster.getTableDescriptors(); + TableDescriptor oldMetaTd = oldTds.get(TableName.META_TABLE_NAME); + assertEquals(3, oldMetaTd.getRegionReplication()); + // force update the replica count to 1 and then kill the master, to simulate that hen upgrading, + // we have no region replication in meta table descriptor but we actually have meta region + // replicas + oldTds.update(TableDescriptorBuilder.newBuilder(oldMetaTd).setRegionReplication(1).build()); + oldMaster.stop("Restarting"); + TEST_UTIL.waitFor(30000, () -> oldMaster.isStopped()); + + // increase replica count to 5 through Configuration + TEST_UTIL.getMiniHBaseCluster().getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 5); + TEST_UTIL.getMiniHBaseCluster().startMaster(); + TEST_UTIL.waitFor(30000, + () -> TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes().size() == 5); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index ba875c5b95fc..359ad61c4b79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -45,6 +45,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @Category({ MediumTests.class, ClientTests.class }) @@ -57,10 +58,10 @@ public class TestMasterRegistry { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(META_REPLICAS_NUM, 3); StartMiniClusterOption.Builder builder = StartMiniClusterOption.builder(); builder.numMasters(3).numRegionServers(3); TEST_UTIL.startMiniCluster(builder.build()); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index abaf0920ce10..d42c1240f9ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; + import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MetaRegionLocationCache; import org.apache.hadoop.hbase.master.RegionState; @@ -57,8 +59,8 @@ public class TestMetaRegionLocationCache { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TEST_UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); TEST_UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java index 91754eb35b73..8ffbe6bb47fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java @@ -20,17 +20,15 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -82,27 +80,6 @@ public void testZookeeperNodesForReplicas() throws Exception { } } - @Test - public void testReplicaCleanup() throws Exception { - ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); - List metaReplicaZnodes = zkw.getMetaReplicaNodes(); - assertEquals(3, metaReplicaZnodes.size()); - - final HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - master.stop("Restarting"); - TEST_UTIL.waitFor(30000, () -> master.isStopped()); - TEST_UTIL.getMiniHBaseCluster().getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 1); - - JVMClusterUtil.MasterThread newMasterThread = TEST_UTIL.getMiniHBaseCluster().startMaster(); - final HMaster newMaster = newMasterThread.getMaster(); - - //wait until new master finished meta replica assignment logic - TEST_UTIL.waitFor(30000, () -> newMaster.getMasterQuotaManager() != null); - zkw = TEST_UTIL.getZooKeeperWatcher(); - metaReplicaZnodes = zkw.getMetaReplicaNodes(); - assertEquals(1, metaReplicaZnodes.size()); - - } @Test public void testAccessingUnknownTables() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 3215ea8b64a7..491612c6be95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -249,9 +249,6 @@ public static void beforeClass() throws Exception { HTU.getConfiguration().setInt("hbase.client.primaryCallTimeout.get", 1000000); HTU.getConfiguration().setInt("hbase.client.primaryCallTimeout.scan", 1000000); - // Enable meta replica at server side - HTU.getConfiguration().setInt("hbase.meta.replica.count", 2); - // Make sure master does not host system tables. HTU.getConfiguration().set("hbase.balancer.tablesOnMaster", "none"); @@ -263,6 +260,9 @@ public static void beforeClass() throws Exception { META_SCAN_TIMEOUT_IN_MILLISEC * 1000); HTU.startMiniCluster(NB_SERVERS); + // Enable meta replica at server side + HBaseTestingUtility.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, 2); + HTU.getHBaseCluster().startMaster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 137cb28573a3..427222f8e40c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -25,6 +24,7 @@ import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; @@ -62,13 +62,9 @@ public class TestZKConnectionRegistry { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(META_REPLICAS_NUM, 3); TEST_UTIL.startMiniCluster(3); - Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - // make sure that we do not depend on this config when getting locations for meta replicas, see - // HBASE-21658. - conf.setInt(META_REPLICAS_NUM, 1); - REGISTRY = new ZKConnectionRegistry(conf); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java index 85eac4014f08..3d36db71242a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java @@ -64,10 +64,10 @@ boolean blockUntilBecomingActiveMaster(int checkInterval, MonitoredTask startupS if (MasterAddressTracker.getMasterAddress(watcher) != null) { clusterHasActiveMaster.set(true); } - Threads.sleepWithoutInterrupt(100); } catch (IOException e) { // pass, we will get notified when some other active master creates the znode. } + Threads.sleepWithoutInterrupt(1000); } catch (KeeperException e) { master.abort("Received an unexpected KeeperException, aborting", e); return false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java index 7d6c55b5c908..8754172ba03a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java @@ -71,7 +71,7 @@ public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedExce RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO; // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR); - FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR, null); + FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR); HRegion r = HBaseTestingUtility.createRegionAndWAL(ri, ROOT_DIR, CONF, fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. From b186c51796cc61039c6e5e18be6ff06dd6ea8754 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 13 Oct 2020 09:44:18 +0800 Subject: [PATCH 130/769] HBASE-25169 Update documentation about meta region replica (#2528) Signed-off-by: Michael Stack --- src/main/asciidoc/_chapters/architecture.adoc | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index bc4af05b4afc..7cf151d2892c 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -2753,8 +2753,6 @@ See the above HDFS Architecture link for more information. [[arch.timelineconsistent.reads]] == Timeline-consistent High Available Reads -NOTE: The current <> does not work well with region replica, so this feature maybe broken. Use it with caution. - [[casestudies.timelineconsistent.intro]] === Introduction @@ -2920,7 +2918,7 @@ Instead you can change the number of region replicas per table to increase or de hbase.regionserver.meta.storefile.refresh.period 300000 - The period (in milliseconds) for refreshing the store files for the hbase:meta tables secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region (there is no notification mechanism). But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting. This should be a non-zero number if meta replicas are enabled (via hbase.meta.replica.count set to greater than 1). + The period (in milliseconds) for refreshing the store files for the hbase:meta tables secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region (there is no notification mechanism). But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting. This should be a non-zero number if meta replicas are enabled. @@ -2953,15 +2951,6 @@ Instead you can change the number of region replicas per table to increase or de The period (in milliseconds) to keep store files in the archive folder before deleting them from the file system. - - hbase.meta.replica.count - 3 - - Region replication count for the meta regions. Defaults to 1. - - - - hbase.region.replica.storefile.refresh.memstore.multiplier 4 From bc20203965c06e12f7061ec6c40221ccee220ae3 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Mon, 12 Oct 2020 19:18:47 -0700 Subject: [PATCH 131/769] "HBASE-25144 Add Hadoop-3.3.0 to personality hadoopcheck" try again (#2536) Due to HBASE-23834, HBASE-19256, HADOOP-16152, HBase 2.2.x and 2.3.x cannot run on Hadoop 3.3.0, or any Hadoop version that has upgraded to Jetty 9.4. Signed-off-by: Sean Busbey Signed-off-by: stack Signed-off-by: Guanghao Zhang Signed-off-by: Duo Zhang --- dev-support/hbase-personality.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 95bbd44bdadc..9b39ead6bc07 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -609,8 +609,15 @@ function hadoopcheck_rebuild else hbase_hadoop3_versions="3.0.3 3.1.1 3.1.2" fi + elif [[ "${PATCH_BRANCH}" = branch-2.2 ]] || [[ "${PATCH_BRANCH}" = branch-2.3 ]]; then + yetus_info "Setting Hadoop 3 versions to test based on branch-2.2/branch-2.3 rules" + if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then + hbase_hadoop3_versions="3.1.2 3.2.1" + else + hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1" + fi else - yetus_info "Setting Hadoop 3 versions to test based on branch-2.2+/master/feature branch rules" + yetus_info "Setting Hadoop 3 versions to test based on branch-2.4+/master/feature branch rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then hbase_hadoop3_versions="3.1.2 3.2.1 3.3.0" else From fd0ecadbb98a426ef0107d7b8299c60be84a548f Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Thu, 8 Oct 2020 10:02:27 -0500 Subject: [PATCH 132/769] HBASE-14067 bundle ruby files for hbase shell into a jar. * removes some cruft from the hbase-shell pom that appears to be from coping the hbase-server pom long ago * puts the ruby scripts into the hbase-shell jar following the guide from jruby for packaging * removes hard coding the location of the implementation scripts from our runtime * removes hard coding the load path for the implementation from the test code (leaves hard coding the test code location) * provides a work around for a name conflict between our shell and the ruby stdlib shell. closes #2515 Signed-off-by: Michael Stack --- bin/hbase | 13 +- bin/hirb.rb | 223 +---------------- .../src/main/assembly/client-components.xml | 7 - .../src/main/assembly/components.xml | 7 - hbase-shell/pom.xml | 54 +--- hbase-shell/src/main/ruby/hbase_shell.rb | 24 ++ hbase-shell/src/main/ruby/jar-bootstrap.rb | 235 ++++++++++++++++++ .../hbase/client/AbstractTestShell.java | 38 ++- .../hadoop/hbase/client/TestAdminShell.java | 8 +- .../hadoop/hbase/client/TestAdminShell2.java | 8 +- .../hadoop/hbase/client/TestQuotasShell.java | 8 +- .../hadoop/hbase/client/TestRSGroupShell.java | 8 +- .../hbase/client/TestReplicationShell.java | 8 +- .../apache/hadoop/hbase/client/TestShell.java | 10 +- .../hbase/client/TestShellNoCluster.java | 13 +- .../hadoop/hbase/client/TestTableShell.java | 8 +- .../client/rsgroup/TestShellRSGroups.java | 8 +- .../src/test/ruby/hbase/admin2_test.rb | 2 +- hbase-shell/src/test/ruby/hbase/admin_test.rb | 2 +- .../hbase/list_regions_test_no_cluster.rb | 2 +- .../src/test/ruby/hbase/quotas_test.rb | 2 +- .../test/ruby/hbase/quotas_test_no_cluster.rb | 2 +- .../test/ruby/hbase/replication_admin_test.rb | 2 +- .../test/ruby/hbase/security_admin_test.rb | 2 +- .../ruby/hbase/test_connection_no_cluster.rb | 2 +- .../hbase/visibility_labels_admin_test.rb | 2 +- .../src/test/ruby/shell/commands_test.rb | 2 +- .../src/test/ruby/shell/converter_test.rb | 4 +- .../src/test/ruby/shell/list_locks_test.rb | 2 +- .../test/ruby/shell/list_procedures_test.rb | 2 +- .../test/ruby/shell/noninteractive_test.rb | 2 +- .../src/test/ruby/shell/rsgroup_shell_test.rb | 2 +- hbase-shell/src/test/ruby/shell/shell_test.rb | 2 +- hbase-shell/src/test/ruby/test_helper.rb | 6 +- hbase-shell/src/test/ruby/tests_runner.rb | 3 + 35 files changed, 369 insertions(+), 354 deletions(-) create mode 100644 hbase-shell/src/main/ruby/hbase_shell.rb create mode 100644 hbase-shell/src/main/ruby/jar-bootstrap.rb diff --git a/bin/hbase b/bin/hbase index dd6cfeef644f..d2307c50c33a 100755 --- a/bin/hbase +++ b/bin/hbase @@ -509,13 +509,22 @@ fi # figure out which class to run if [ "$COMMAND" = "shell" ] ; then #find the hbase ruby sources + # assume we are in a binary install if lib/ruby exists if [ -d "$HBASE_HOME/lib/ruby" ]; then - HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby" + # We want jruby to consume these things rather than our bootstrap script; + # jruby will look for the env variable 'JRUBY_OPTS'. + JRUBY_OPTS="${JRUBY_OPTS} -X+O" + export JRUBY_OPTS + # hbase-shell.jar contains a 'jar-bootstrap.rb' + # for more info see + # https://github.com/jruby/jruby/wiki/StandaloneJarsAndClasses#standalone-executable-jar-files + CLASS="org.jruby.JarBootstrapMain" + # otherwise assume we are running in a source checkout else HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby" + CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/hbase-shell/src/main/ruby/jar-bootstrap.rb" fi HBASE_OPTS="$HBASE_OPTS $HBASE_SHELL_OPTS" - CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb" elif [ "$COMMAND" = "hbck" ] ; then # Look for the -j /path/to/HBCK2.jar parameter. Else pass through to hbck. case "${1}" in diff --git a/bin/hirb.rb b/bin/hirb.rb index 7b1b8f172c25..12353ca1a0ec 100644 --- a/bin/hirb.rb +++ b/bin/hirb.rb @@ -1,5 +1,3 @@ -# -# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -15,217 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# File passed to org.jruby.Main by bin/hbase. Pollutes jirb with hbase imports -# and hbase commands and then loads jirb. Outputs a banner that tells user -# where to find help, shell version, and loads up a custom hirb. -# -# In noninteractive mode, runs commands from stdin until completion or an error. -# On success will exit with status 0, on any problem will exit non-zero. Callers -# should only rely on "not equal to 0", because the current error exit code of 1 -# will likely be updated to diffentiate e.g. invalid commands, incorrect args, -# permissions, etc. - -# TODO: Interrupt a table creation or a connection to a bad master. Currently -# has to time out. Below we've set down the retries for rpc and hbase but -# still can be annoying (And there seem to be times when we'll retry for -# ever regardless) -# TODO: Add support for listing and manipulating catalog tables, etc. -# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes - -# Run the java magic include and import basic HBase types that will help ease -# hbase hacking. -include Java - -# Some goodies for hirb. Should these be left up to the user's discretion? -require 'irb/completion' -require 'pathname' - -# Add the directory names in hbase.jruby.sources commandline option -# to the ruby load path so I can load up my HBase ruby modules -sources = java.lang.System.getProperty('hbase.ruby.sources') -$LOAD_PATH.unshift Pathname.new(sources) - -# -# FIXME: Switch args processing to getopt -# -# See if there are args for this shell. If any, read and then strip from ARGV -# so they don't go through to irb. Output shell 'usage' if user types '--help' -cmdline_help = <hbase-config.cmd - - - ${project.basedir}/../hbase-shell/src/main/ruby - lib/ruby - 0644 - 0755 - ${project.basedir}/../hbase-server/target/native diff --git a/hbase-assembly/src/main/assembly/components.xml b/hbase-assembly/src/main/assembly/components.xml index aaa6a831ad59..3e1394e7d5b1 100644 --- a/hbase-assembly/src/main/assembly/components.xml +++ b/hbase-assembly/src/main/assembly/components.xml @@ -69,13 +69,6 @@ **/*.cmd - - - ${project.basedir}/../hbase-shell/src/main/ruby - lib/ruby - 0644 - 0755 - ${project.basedir}/../hbase-server/target/hbase-webapps diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index b6d2e30388a8..63db97719c67 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -30,15 +30,9 @@ Apache HBase - Shell Shell for HBase - - - ${project.build.directory} - - hbase-webapps/** - + src/main/ruby @@ -50,38 +44,15 @@ - - - - maven-assembly-plugin - - true - - org.apache.maven.plugins maven-jar-plugin - org/apache/hadoop/hbase/mapreduce/Driver + org.jruby.JarBootstrapMain - - - org/apache/jute/** - org/apache/zookeeper/** - **/*.jsp - hbase-site.xml - hdfs-site.xml - log4j.properties - mapred-queues.xml - mapred-site.xml - @@ -89,27 +60,6 @@ org.apache.maven.plugins maven-source-plugin - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-jamon - ${project.build.directory}/generated-sources/java - - - - - net.revelc.code diff --git a/hbase-shell/src/main/ruby/hbase_shell.rb b/hbase-shell/src/main/ruby/hbase_shell.rb new file mode 100644 index 000000000000..e5e85ab68208 --- /dev/null +++ b/hbase-shell/src/main/ruby/hbase_shell.rb @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ruby has a stdlib named 'shell' so using "require 'shell'" does not +# work if our shell implementation is not on the local filesystem. +# this is the absolute path to our shell implementation when packaged +# in a jar. The level of indirection provided by this file lets things +# still behave the same as in earlier releases if folks unpackage the +# jar contents onto the local filesystem if they need that for some +# other reason. +require 'uri:classloader:/shell.rb' diff --git a/hbase-shell/src/main/ruby/jar-bootstrap.rb b/hbase-shell/src/main/ruby/jar-bootstrap.rb new file mode 100644 index 000000000000..de602bf551d1 --- /dev/null +++ b/hbase-shell/src/main/ruby/jar-bootstrap.rb @@ -0,0 +1,235 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File passed to org.jruby.Main by bin/hbase. Pollutes jirb with hbase imports +# and hbase commands and then loads jirb. Outputs a banner that tells user +# where to find help, shell version, and loads up a custom hirb. +# +# In noninteractive mode, runs commands from stdin until completion or an error. +# On success will exit with status 0, on any problem will exit non-zero. Callers +# should only rely on "not equal to 0", because the current error exit code of 1 +# will likely be updated to diffentiate e.g. invalid commands, incorrect args, +# permissions, etc. + +# TODO: Interrupt a table creation or a connection to a bad master. Currently +# has to time out. Below we've set down the retries for rpc and hbase but +# still can be annoying (And there seem to be times when we'll retry for +# ever regardless) +# TODO: Add support for listing and manipulating catalog tables, etc. +# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes + +# Run the java magic include and import basic HBase types that will help ease +# hbase hacking. +include Java + +# Some goodies for hirb. Should these be left up to the user's discretion? +require 'irb/completion' +require 'pathname' + +# Add the directory names in hbase.jruby.sources commandline option +# to the ruby load path so I can load up my HBase ruby modules +# in case we are trying to get them out of source instead of jar +# packaging. +sources = java.lang.System.getProperty('hbase.ruby.sources') +unless sources.nil? + $LOAD_PATH.unshift Pathname.new(sources) +end + +# +# FIXME: Switch args processing to getopt +# +# See if there are args for this shell. If any, read and then strip from ARGV +# so they don't go through to irb. Output shell 'usage' if user types '--help' +cmdline_help = < loadPaths = new ArrayList<>(2); - loadPaths.add("src/main/ruby"); loadPaths.add("src/test/ruby"); jruby.setLoadPaths(loadPaths); jruby.put("$TEST_CLUSTER", TEST_UTIL); @@ -65,6 +71,34 @@ protected static void setUpJRubyRuntime() { System.setProperty("jruby.native.verbose", "true"); } + /** + * @return comma separated list of ruby script names for tests + */ + protected String getIncludeList() { + return ""; + } + + /** + * @return comma separated list of ruby script names for tests to skip + */ + protected String getExcludeList() { + return ""; + } + + @Test + public void testRunShellTests() throws IOException { + final String tests = getIncludeList(); + final String excludes = getExcludeList(); + if (!tests.isEmpty()) { + System.setProperty("shell.test.include", tests); + } + if (!excludes.isEmpty()) { + System.setProperty("shell.test.exclude", excludes); + } + LOG.info("Starting ruby tests. includes: {} excludes: {}", tests, excludes); + jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + } + @BeforeClass public static void setUpBeforeClass() throws Exception { setUpConfig(); diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java index 1835d88dc5cf..7cfd603ddb8e 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java @@ -33,10 +33,8 @@ public class TestAdminShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdminShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "admin_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "admin_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java index e2dadd059346..b94a579ea322 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java @@ -33,10 +33,8 @@ public class TestAdminShell2 extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdminShell2.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "admin2_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "admin2_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java index 482bf0f46bf6..f2bb06f7369d 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java @@ -33,10 +33,8 @@ public class TestQuotasShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestQuotasShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "quotas_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "quotas_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java index f26f9f53375b..a2bc6a47c120 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java @@ -47,10 +47,8 @@ public static void setUpBeforeClass() throws Exception { setUpJRubyRuntime(); } - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "rsgroup_shell_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "rsgroup_shell_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java index ca371e145b98..146a73fa0e33 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java @@ -33,10 +33,8 @@ public class TestReplicationShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestReplicationShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "replication_admin_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "replication_admin_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java index 8f3aefba1808..434d8cf6709b 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java @@ -32,11 +32,9 @@ public class TestShell extends AbstractTestShell { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.exclude", "replication_admin_test.rb,rsgroup_shell_test.rb," + - "admin_test.rb,table_test.rb,quotas_test.rb,admin2_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getExcludeList() { + return "replication_admin_test.rb,rsgroup_shell_test.rb,admin_test.rb,table_test.rb," + + "quotas_test.rb,admin2_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java index 3172e973b76c..1bea652923ae 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java @@ -30,8 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @Category({ ClientTests.class, MediumTests.class }) public class TestShellNoCluster extends AbstractTestShell { + private static final Logger LOG = LoggerFactory.getLogger(TestShellNoCluster.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -41,7 +45,6 @@ public class TestShellNoCluster extends AbstractTestShell { public static void setUpBeforeClass() throws Exception { // no cluster List loadPaths = new ArrayList<>(2); - loadPaths.add("src/main/ruby"); loadPaths.add("src/test/ruby"); jruby.setLoadPaths(loadPaths); jruby.put("$TEST_CLUSTER", TEST_UTIL); @@ -55,9 +58,11 @@ public static void tearDownAfterClass() throws Exception { // no cluster } + // Keep the same name so we override the with-a-cluster test + @Override @Test - public void testRunNoClusterShellTests() throws IOException { - // Start ruby tests without cluster - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/no_cluster_tests_runner.rb"); + public void testRunShellTests() throws IOException { + LOG.info("Start ruby tests without cluster"); + jruby.runScriptlet(PathType.CLASSPATH, "no_cluster_tests_runner.rb"); } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java index e2fdcaa67fcd..26369349e1e5 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java @@ -33,10 +33,8 @@ public class TestTableShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTableShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "table_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "test_table.rb"; } } diff --git a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java index 9c28cbf5b75b..380ad6163228 100644 --- a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java +++ b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java @@ -52,11 +52,9 @@ public class TestShellRSGroups { final Logger LOG = LoggerFactory.getLogger(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static ScriptingContainer jruby = new ScriptingContainer(); - private static String basePath; @BeforeClass public static void setUpBeforeClass() throws Exception { - basePath = System.getProperty("basedir"); // Start mini cluster TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); @@ -80,8 +78,7 @@ public static void setUpBeforeClass() throws Exception { // Configure jruby runtime List loadPaths = new ArrayList<>(2); - loadPaths.add(basePath+"/src/main/ruby"); - loadPaths.add(basePath+"/src/test/ruby"); + loadPaths.add("src/test/ruby"); jruby.setLoadPaths(loadPaths); jruby.put("$TEST_CLUSTER", TEST_UTIL); System.setProperty("jruby.jit.logging.verbose", "true"); @@ -99,8 +96,7 @@ public void testRunShellTests() throws IOException { try { // Start only GroupShellTest System.setProperty("shell.test", "Hbase::RSGroupShellTest"); - jruby.runScriptlet(PathType.ABSOLUTE, - basePath + "/src/test/ruby/tests_runner.rb"); + jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); } finally { System.clearProperty("shell.test"); } diff --git a/hbase-shell/src/test/ruby/hbase/admin2_test.rb b/hbase-shell/src/test/ruby/hbase/admin2_test.rb index 9d3834ee667f..8d368188ae18 100644 --- a/hbase-shell/src/test/ruby/hbase/admin2_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin2_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index 65e3e0a6a2d6..fac52ede51b7 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb b/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb index 6be259779d95..75a3c0ef252f 100644 --- a/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb +++ b/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb @@ -15,7 +15,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' java_import 'org.apache.hadoop.hbase.HRegionLocation' diff --git a/hbase-shell/src/test/ruby/hbase/quotas_test.rb b/hbase-shell/src/test/ruby/hbase/quotas_test.rb index c4fca28bdfdc..6e506c52f14a 100644 --- a/hbase-shell/src/test/ruby/hbase/quotas_test.rb +++ b/hbase-shell/src/test/ruby/hbase/quotas_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb b/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb index 79f735021a8a..471a81013dd8 100644 --- a/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb +++ b/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb index 72fbe943040f..c6ed817ad4ea 100644 --- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' diff --git a/hbase-shell/src/test/ruby/hbase/security_admin_test.rb b/hbase-shell/src/test/ruby/hbase/security_admin_test.rb index 6e9a50cafcd1..8839c33dabce 100644 --- a/hbase-shell/src/test/ruby/hbase/security_admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/security_admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' diff --git a/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb b/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb index 361937634c3c..6969a3613e44 100644 --- a/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb +++ b/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb index e69710d69a53..b59b9b9065fa 100644 --- a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' diff --git a/hbase-shell/src/test/ruby/shell/commands_test.rb b/hbase-shell/src/test/ruby/shell/commands_test.rb index 0fc3dab4110f..c97931ff20f8 100644 --- a/hbase-shell/src/test/ruby/shell/commands_test.rb +++ b/hbase-shell/src/test/ruby/shell/commands_test.rb @@ -19,7 +19,7 @@ require 'hbase_constants' require 'hbase/table' -require 'shell' +require 'hbase_shell' ## # Tests whether all registered commands have a help and command method diff --git a/hbase-shell/src/test/ruby/shell/converter_test.rb b/hbase-shell/src/test/ruby/shell/converter_test.rb index 51e674093f25..34999ea00715 100644 --- a/hbase-shell/src/test/ruby/shell/converter_test.rb +++ b/hbase-shell/src/test/ruby/shell/converter_test.rb @@ -15,7 +15,7 @@ # limitations under the License. require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class ConverterTest < Test::Unit::TestCase @@ -153,4 +153,4 @@ def teardown assert(!output.include?(hex_column)) end end -end \ No newline at end of file +end diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb b/hbase-shell/src/test/ruby/shell/list_locks_test.rb index 6d291a5000fd..20a910c485dd 100644 --- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb +++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class ListLocksTest < Test::Unit::TestCase diff --git a/hbase-shell/src/test/ruby/shell/list_procedures_test.rb b/hbase-shell/src/test/ruby/shell/list_procedures_test.rb index 2bf5824c0ee3..a9a38fe9e734 100644 --- a/hbase-shell/src/test/ruby/shell/list_procedures_test.rb +++ b/hbase-shell/src/test/ruby/shell/list_procedures_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class ListProceduresTest < Test::Unit::TestCase diff --git a/hbase-shell/src/test/ruby/shell/noninteractive_test.rb b/hbase-shell/src/test/ruby/shell/noninteractive_test.rb index 0fae4cbb79a7..fa8dd333be0d 100644 --- a/hbase-shell/src/test/ruby/shell/noninteractive_test.rb +++ b/hbase-shell/src/test/ruby/shell/noninteractive_test.rb @@ -15,7 +15,7 @@ # limitations under the License. # require 'hbase_constants' -require 'shell' +require 'hbase_shell' class NonInteractiveTest < Test::Unit::TestCase def setup diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb index 33a6c498bec7..e8ba851b20b8 100644 --- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb +++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class RSGroupShellTest < Test::Unit::TestCase diff --git a/hbase-shell/src/test/ruby/shell/shell_test.rb b/hbase-shell/src/test/ruby/shell/shell_test.rb index 9be6bfba1414..b16aef385408 100644 --- a/hbase-shell/src/test/ruby/shell/shell_test.rb +++ b/hbase-shell/src/test/ruby/shell/shell_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' class ShellTest < Test::Unit::TestCase include Hbase::TestHelpers diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb index 26b142638f04..db014f502787 100644 --- a/hbase-shell/src/test/ruby/test_helper.rb +++ b/hbase-shell/src/test/ruby/test_helper.rb @@ -39,7 +39,7 @@ module Hbase module TestHelpers require 'hbase_constants' require 'hbase/hbase' - require 'shell' + require 'hbase_shell' def setup_hbase hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) @@ -169,7 +169,3 @@ def capture_stdout # Extend standard unit tests with our helpers Test::Unit::TestCase.extend(Testing::Declarative) - -# Add the $HBASE_HOME/lib/ruby directory to the ruby -# load path so I can load up my HBase ruby modules -$LOAD_PATH.unshift File.join(File.dirname(__FILE__), "..", "..", "main", "ruby") diff --git a/hbase-shell/src/test/ruby/tests_runner.rb b/hbase-shell/src/test/ruby/tests_runner.rb index b0a0aaf17636..147d68103f5e 100644 --- a/hbase-shell/src/test/ruby/tests_runner.rb +++ b/hbase-shell/src/test/ruby/tests_runner.rb @@ -21,6 +21,8 @@ require 'rake' require 'set' +puts "Ruby description: #{RUBY_DESCRIPTION}" + unless defined?($TEST_CLUSTER) include Java @@ -68,6 +70,7 @@ next end begin + puts "loading test file '#{filename}'." load(file) rescue => e puts "ERROR: #{e}" From 78b7244091f294d7e2f59a563d34dac7cf722cd7 Mon Sep 17 00:00:00 2001 From: XinSun Date: Thu, 15 Oct 2020 01:08:54 +0800 Subject: [PATCH 133/769] HBASE-25117 ReplicationSourceShipper thread can not be finished (#2521) Signed-off-by: Wellington Chevreuil Signed-off-by: stack Signed-off-by: Guanghao Zhang Signed-off-by: Duo Zhang --- .../regionserver/HBaseInterClusterReplicationEndpoint.java | 5 +++-- .../hbase/replication/regionserver/ReplicationSource.java | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index b127b467505d..56bccc09cdc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -193,7 +193,7 @@ private void connectToPeers() { * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ - protected boolean sleepForRetries(String msg, int sleepMultiplier) { + private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { LOG.trace("{} {}, sleeping {} times {}", @@ -201,8 +201,9 @@ protected boolean sleepForRetries(String msg, int sleepMultiplier) { } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { + Thread.currentThread().interrupt(); if (LOG.isDebugEnabled()) { - LOG.debug("{} Interrupted while sleeping between retries", logPeerId()); + LOG.debug("{} {} Interrupted while sleeping between retries", msg, logPeerId()); } } return sleepMultiplier < maxRetriesMultiplier; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index b68e0587d7de..8091d0ce71f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -691,6 +691,9 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, } } + if (this.replicationEndpoint != null) { + this.replicationEndpoint.stop(); + } for (ReplicationSourceShipper worker : workers) { if (worker.isAlive() || worker.entryReader.isAlive()) { try { @@ -711,9 +714,6 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, } } - if (this.replicationEndpoint != null) { - this.replicationEndpoint.stop(); - } if (join) { for (ReplicationSourceShipper worker : workers) { Threads.shutdown(worker, this.sleepForRetries); From 38ac7e26ae0e3dacdeba23e1e9d2215ee2649849 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 15 Oct 2020 10:22:14 +0800 Subject: [PATCH 134/769] HBASE-25186 TestMasterRegionOnTwoFileSystems is failing after HBASE-25065 (#2544) Signed-off-by: Ramkrishna --- .../hbase/master/region/MasterRegionWALRoller.java | 6 ++++-- .../apache/hadoop/hbase/wal/AbstractWALRoller.java | 12 +++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java index ef3dd121133b..bba6611c68d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.AbstractWALRoller; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -68,8 +67,11 @@ private MasterRegionWALRoller(String name, Configuration conf, Abortable abortab } @Override - protected void afterRoll(WAL wal) { + protected void afterWALArchive(Path oldPath, Path newPath) { // move the archived WAL files to the global archive path + // here we do not use the newPath directly, so that even if we fail to move some of the + // newPaths, we are still safe because every time we will get all the files under the archive + // directory. try { MasterRegionUtils.moveFilesUnderDir(fs, walArchiveDir, globalWALArchiveDir, archivedWALSuffix); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index a5a0ee3a3225..4d89c4753844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; @@ -86,6 +87,11 @@ public void logRollRequested(WALActionsListener.RollRequestReason reason) { AbstractWALRoller.this.notifyAll(); } } + + @Override + public void postLogArchive(Path oldPath, Path newPath) throws IOException { + afterWALArchive(oldPath, newPath); + } }); } } @@ -190,7 +196,6 @@ public void run() { LOG.warn("WAL has been closed. Skipping rolling of writer and just remove it", e); iter.remove(); } - afterRoll(wal); } } catch (FailedLogCloseException | ConnectException e) { abort("Failed log close in log roller", e); @@ -206,10 +211,7 @@ public void run() { LOG.info("LogRoller exiting."); } - /** - * Called after we finish rolling the give {@code wal}. - */ - protected void afterRoll(WAL wal) { + protected void afterWALArchive(Path oldPath, Path newPath) { } /** From 70d2f37ee38bd89f00279d319ae99bd76108fe2d Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 15 Oct 2020 07:02:53 -0700 Subject: [PATCH 135/769] HBASE-24628 Region normalizer now respects a rate limit (HMaster chore shutdown NPE ADDENDUM) (#2540) Signed-off-by: Michael Stack Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/master/HMaster.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 85ac5e0b0490..f58096fa5407 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1123,7 +1123,9 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc getChoreService().scheduleChore(clusterStatusChore); this.balancerChore = new BalancerChore(this); getChoreService().scheduleChore(balancerChore); - getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); + if (regionNormalizerManager != null) { + getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); + } this.catalogJanitorChore = new CatalogJanitor(this); getChoreService().scheduleChore(catalogJanitorChore); this.hbckChore = new HbckChore(this); @@ -1638,7 +1640,9 @@ private void stopChores() { choreService.cancelChore(this.mobFileCleanerChore); choreService.cancelChore(this.mobFileCompactionChore); choreService.cancelChore(this.balancerChore); - choreService.cancelChore(getRegionNormalizerManager().getRegionNormalizerChore()); + if (regionNormalizerManager != null) { + choreService.cancelChore(regionNormalizerManager.getRegionNormalizerChore()); + } choreService.cancelChore(this.clusterStatusChore); choreService.cancelChore(this.catalogJanitorChore); choreService.cancelChore(this.clusterStatusPublisherChore); From e6c78f40377dbf672c09761f6733fb4b496b5a3d Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Fri, 16 Oct 2020 12:04:10 +0530 Subject: [PATCH 136/769] HBASE-25179 : Fix Assert format in HFilePerformanceEvaluation class Closes #2551 Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/HFilePerformanceEvaluation.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index 2c4209ce8db8..cbfadb5d22e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -447,7 +447,7 @@ void doRow(int i) throws Exception { // TODO: Fix. Make Scanner do Cells. Cell c = this.scanner.getCell(); PerformanceEvaluationCommons.assertKey(format(i + 1), c); - PerformanceEvaluationCommons.assertValueSize(c.getValueLength(), ROW_LENGTH); + PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } } @@ -478,7 +478,7 @@ void doRow(int i) throws Exception { // TODO: Fix scanner so it does Cells Cell c = scanner.getCell(); PerformanceEvaluationCommons.assertKey(b, c); - PerformanceEvaluationCommons.assertValueSize(c.getValueLength(), ROW_LENGTH); + PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } private byte [] getRandomRow() { @@ -515,7 +515,7 @@ void doRow(int i) throws Exception { return; } c = scanner.getCell(); - PerformanceEvaluationCommons.assertValueSize(c.getValueLength(), ROW_LENGTH); + PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } } From 90052ccf1bcd5ddfcc65a03a7b95f6ec2cadcdf2 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 20 Oct 2020 11:51:34 +0800 Subject: [PATCH 137/769] HBASE-25194 Do not publish workspace in flaky find job (#2564) Signed-off-by: Sean Busbey --- .../flaky-tests/flaky-reporting.Jenkinsfile | 7 ++++--- dev-support/flaky-tests/report-flakies.py | 17 ++++++++++++----- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 640b1cb54a77..25e3fdeef841 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -43,7 +43,8 @@ pipeline { flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase%20Nightly/job/${BRANCH_NAME}" --is-yetus True --max-builds 10) flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase-Flaky-Tests/job/${BRANCH_NAME}" --is-yetus False --max-builds 30) docker build -t hbase-dev-support dev-support - docker run --ulimit nproc=12500 -v "${WORKSPACE}":/hbase --workdir=/hbase hbase-dev-support python dev-support/flaky-tests/report-flakies.py --mvn -v "${flaky_args[@]}" + docker run --ulimit nproc=12500 -v "${WORKSPACE}":/hbase -u `id -u`:`id -g` --workdir=/hbase hbase-dev-support \ + python dev-support/flaky-tests/report-flakies.py --mvn -v -o output "${flaky_args[@]}" ''' } } @@ -51,13 +52,13 @@ pipeline { post { always { // Has to be relative to WORKSPACE. - archiveArtifacts artifacts: "includes,excludes,dashboard.html" + archiveArtifacts artifacts: "output/*" publishHTML target: [ allowMissing: true, keepAll: true, alwaysLinkToLastBuild: true, // Has to be relative to WORKSPACE - reportDir: ".", + reportDir: "output", reportFiles: 'dashboard.html', reportName: 'Flaky Test Report' ] diff --git a/dev-support/flaky-tests/report-flakies.py b/dev-support/flaky-tests/report-flakies.py index 1b3161af6d83..d29ecfa4da6e 100755 --- a/dev-support/flaky-tests/report-flakies.py +++ b/dev-support/flaky-tests/report-flakies.py @@ -60,6 +60,8 @@ "strings are written to files so they can be saved as artifacts and easily imported in " "other projects. Also writes timeout and failing tests in separate files for " "reference.") +parser.add_argument("-o", "--output", metavar='dir', action='store', required=False, + help="the output directory") parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true") args = parser.parse_args() @@ -68,6 +70,11 @@ if args.verbose: logger.setLevel(logging.INFO) +output_dir = '.' +if args.output is not None: + output_dir = args.output + if not os.path.exists(output_dir): + os.makedirs(output_dir) def get_bad_tests(build_url, is_yetus): """ @@ -257,24 +264,24 @@ def expand_multi_config_projects(cli_args): all_bad_tests = all_hanging_tests.union(all_failed_tests) if args.mvn: includes = ",".join(all_bad_tests) - with open("./includes", "w") as inc_file: + with open(output_dir + "/includes", "w") as inc_file: inc_file.write(includes) excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests] - with open("./excludes", "w") as exc_file: + with open(output_dir + "/excludes", "w") as exc_file: exc_file.write(",".join(excludes)) - with open("./timeout", "w") as timeout_file: + with open(output_dir + "/timeout", "w") as timeout_file: timeout_file.write(",".join(all_timeout_tests)) - with open("./failed", "w") as failed_file: + with open(output_dir + "/failed", "w") as failed_file: failed_file.write(",".join(all_failed_tests)) dev_support_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f: template = Template(f.read()) -with open("dashboard.html", "w") as f: +with open(output_dir + "/dashboard.html", "w") as f: datetime = time.strftime("%m/%d/%Y %H:%M:%S") f.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests), results=url_to_bad_test_results, build_ids=url_to_build_ids)) From 7d00e43dce576387f16e87fc7f89dd03358cc7a1 Mon Sep 17 00:00:00 2001 From: Ankit Singhal Date: Mon, 19 Oct 2020 22:22:33 -0700 Subject: [PATCH 138/769] HBASE-25166 MobFileCompactionChore is closing the master's shared cluster connection (#2514) --- .../org/apache/hadoop/hbase/mob/MobFileCompactionChore.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java index 314729833959..dd5d2898ea01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.CompactionState; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; @@ -85,9 +84,7 @@ protected void chore() { boolean reported = false; - try (Connection conn = master.getConnection(); - Admin admin = conn.getAdmin();) { - + try (Admin admin = master.getConnection().getAdmin()) { TableDescriptors htds = master.getTableDescriptors(); Map map = htds.getAll(); for (TableDescriptor htd : map.values()) { From 28f2c176ec229814ef7d2cd206c803408d78e9b3 Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Tue, 20 Oct 2020 16:58:43 +0800 Subject: [PATCH 139/769] HBASE-25204 Nightly job failed as the name of jdk and maven changed (#2567) Signed-off-by: Duo Zhang --- dev-support/Jenkinsfile | 4 ++-- dev-support/adhoc_run_tests/Jenkinsfile | 4 ++-- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 01d50197fbc5..94535b5b443b 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -593,9 +593,9 @@ pipeline { // TODO (HBASE-23870): replace this with invocation of the release tool stage ('packaging and integration') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } environment { BASEDIR = "${env.WORKSPACE}/component" diff --git a/dev-support/adhoc_run_tests/Jenkinsfile b/dev-support/adhoc_run_tests/Jenkinsfile index e06fdba325a6..476795d50ca8 100644 --- a/dev-support/adhoc_run_tests/Jenkinsfile +++ b/dev-support/adhoc_run_tests/Jenkinsfile @@ -51,10 +51,10 @@ pipeline { stages { stage ('run tests') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch // the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } steps { sh """#!/bin/bash -e diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index 959ae31a0767..a6996bf8bf07 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -34,8 +34,8 @@ pipeline { } tools { // this should match what the yetus nightly job for the branch will use - maven 'Maven (latest)' - jdk "JDK 1.8 (latest)" + maven 'maven_latest' + jdk "jdk_1.8_latest" } stages { stage ('run flaky tests') { From 200343c59c9042985ccf1106a198385fac58866e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 20 Oct 2020 17:00:06 +0800 Subject: [PATCH 140/769] HBASE-25203 Change the reference url to flaky list in our jenkins jobs (#2566) Signed-off-by: Guanghao Zhang --- dev-support/Jenkinsfile | 2 +- dev-support/Jenkinsfile_GitHub | 2 +- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 2 +- dev-support/hbase-personality.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 94535b5b443b..9f23a58873cd 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -49,7 +49,7 @@ pipeline { ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' - EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes" + EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/excludes" // TODO does hadoopcheck need to be jdk specific? SHALLOW_CHECKS = 'all,-shadedjars,-unit' // run by the 'yetus general check' DEEP_CHECKS = 'compile,htmlout,javac,maven,mvninstall,shadedjars,unit' // run by 'yetus jdkX (HadoopY) checks' diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index d314ba45cd9c..a15ee9e84957 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -44,7 +44,7 @@ pipeline { ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' - EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${CHANGE_TARGET}/lastSuccessfulBuild/artifact/excludes" + EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${CHANGE_TARGET}/lastSuccessfulBuild/artifact/output/excludes" // a global view of paths. parallel stages can land on the same host concurrently, so each // stage works in its own subdirectory. there is an "output" under each of these diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index a6996bf8bf07..0ba200ba07f2 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -49,7 +49,7 @@ pipeline { mvn_args=("${mvn_args[@]}" -X) set -x fi - curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/includes" + curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/includes" if [ -s includes.txt ]; then rm -rf local-repository/org/apache/hbase mvn clean "${mvn_args[@]}" diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 9b39ead6bc07..2eee06e1e9ba 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -318,7 +318,7 @@ function get_include_exclude_tests_arg fi else # Use branch specific exclude list when EXCLUDE_TESTS_URL and INCLUDE_TESTS_URL are empty - FLAKY_URL="https://ci-hadoop.apache.org/job/HBase/job/HBase-Find-Flaky-Tests/job/${PATCH_BRANCH}/lastSuccessfulBuild/artifact/excludes/" + FLAKY_URL="https://ci-hadoop.apache.org/job/HBase/job/HBase-Find-Flaky-Tests/job/${PATCH_BRANCH}/lastSuccessfulBuild/artifact/output/excludes" if wget "${FLAKY_URL}" -O "excludes"; then excludes=$(cat excludes) yetus_debug "excludes=${excludes}" From 6c8f5f30d5e5402f99834fd8a3b75736fef27cd4 Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Wed, 21 Oct 2020 09:18:35 +0200 Subject: [PATCH 141/769] HBASE-25196 Add deprecation documentation to HConstants (#2559) Add the documentation when HConstants#REPLICATION_DROP_ON_DELETED_TABLE_KEY was deprecated and when it is expected to be removed. Signed-off-by: Duo Zhang --- .../src/main/java/org/apache/hadoop/hbase/HConstants.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 41bf487de055..e1d3de9d513b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1358,7 +1358,9 @@ public enum OperationStatusCode { /** * Drop edits for tables that been deleted from the replication source and target - * @deprecated moved it into HBaseInterClusterReplicationEndpoint + * @deprecated since 3.0.0. Will be removed in 4.0.0. + * Moved it into HBaseInterClusterReplicationEndpoint. + * @see HBASE-24359 */ @Deprecated public static final String REPLICATION_DROP_ON_DELETED_TABLE_KEY = From 1b58af1841d7d38b47871f9bd99b6b851fd528a9 Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Wed, 21 Oct 2020 15:40:45 +0200 Subject: [PATCH 142/769] HBASE-25198 Remove deprecated RpcSchedulerFactory#create (#2561) Remove the deprecated RpcSchedulerFactory#create(Configuration, PriorityFunction) method from the interface and in all implementing classes. Signed-off-by: Duo Zhang --- .../hbase/regionserver/FifoRpcSchedulerFactory.java | 6 ------ .../hadoop/hbase/regionserver/RpcSchedulerFactory.java | 7 ------- .../hbase/regionserver/SimpleRpcSchedulerFactory.java | 10 ---------- 3 files changed, 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java index c77de648f4e9..12896a2d54ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java @@ -38,10 +38,4 @@ public RpcScheduler create(Configuration conf, PriorityFunction priority, Aborta HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); return new FifoRpcScheduler(conf, handlerCount); } - - @Deprecated - @Override - public RpcScheduler create(Configuration conf, PriorityFunction priority) { - return create(conf, priority, null); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java index dbd393db9884..d1d1cfc52942 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java @@ -35,11 +35,4 @@ public interface RpcSchedulerFactory { * Constructs a {@link org.apache.hadoop.hbase.ipc.RpcScheduler}. */ RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server); - - /** - * @deprecated since 1.0.0. - * @see HBASE-12028 - */ - @Deprecated - RpcScheduler create(Configuration conf, PriorityFunction priority); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java index 22a9da548d6f..06b004321c55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java @@ -32,16 +32,6 @@ @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory { - /** - * @deprecated since 1.0.0. - * @see HBASE-12028 - */ - @Override - @Deprecated - public RpcScheduler create(Configuration conf, PriorityFunction priority) { - return create(conf, priority, null); - } - @Override public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) { int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, From 2cd5f914af38e13597209f7d53d38f77c653545b Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Wed, 21 Oct 2020 19:04:17 +0200 Subject: [PATCH 143/769] HBASE-25197 Remove SingletonCoprocessorService interface (#2560) Remove the SingletonCoprocessorService interface targeted for removal in 3.0.0. Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../SingletonCoprocessorService.java | 37 ------------------- 1 file changed, 37 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java deleted file mode 100644 index 2b2aedee22c9..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.coprocessor; - -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; - -import org.apache.hbase.thirdparty.com.google.protobuf.Service; - -/** - * Coprocessor endpoints registered once per server and providing protobuf services should implement - * this interface and return the {@link Service} instance via {@link #getService()}. - * @deprecated Since 2.0. Will be removed in 3.0 - */ -@Deprecated -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -@InterfaceStability.Evolving -public interface SingletonCoprocessorService { - Service getService(); -} From 74df1e1abafffb667d5025129d18fd37eeb24667 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 22 Oct 2020 21:50:43 +0800 Subject: [PATCH 144/769] HBASE-25207 Revisit the implementation and usage of RegionStates.include (#2571) Remove the RegionStates.include method as its name is ambiguous. Add more comments to describe the logic on why we filter region like this. Signed-off-by: Toshihiro Suzuki --- .../apache/hadoop/hbase/master/HMaster.java | 2 +- .../master/assignment/AssignmentManager.java | 17 +++++++++-- .../hbase/master/assignment/RegionStates.java | 28 ++++++++++++------- .../procedure/EnableTableProcedure.java | 4 +-- .../master/assignment/TestRegionStates.java | 23 ++++++--------- 5 files changed, 45 insertions(+), 29 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index f58096fa5407..575feae4c75f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3824,7 +3824,7 @@ public CompactionState getCompactionState(final TableName tableName) { CompactionState compactionState = CompactionState.NONE; try { List regions = - assignmentManager.getRegionStates().getRegionsOfTable(tableName, false); + assignmentManager.getRegionStates().getRegionsOfTable(tableName); for (RegionInfo regionInfo : regions) { ServerName serverName = assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 9a88533f3565..49f1eb1fb567 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -883,8 +883,13 @@ private TransitRegionStateProcedure[] createAssignProcedures( private TransitRegionStateProcedure forceCreateUnssignProcedure(RegionStateNode regionNode) { regionNode.lock(); try { - if (!regionStates.include(regionNode, false) || - regionStates.isRegionOffline(regionNode.getRegionInfo())) { + if (regionNode.isInState(State.OFFLINE, State.CLOSED, State.SPLIT)) { + return null; + } + // in general, a split parent should be in CLOSED or SPLIT state, but anyway, let's check it + // here for safety + if (regionNode.getRegionInfo().isSplit()) { + LOG.warn("{} is a split parent but not in CLOSED or SPLIT state", regionNode); return null; } // As in DisableTableProcedure or ModifyTableProcedure, we will hold the xlock for table, so @@ -1922,6 +1927,14 @@ public void markRegionAsSplit(final RegionInfo parent, final ServerName serverNa nodeB.setState(State.SPLITTING_NEW); TableDescriptor td = master.getTableDescriptors().get(parent.getTable()); + // TODO: here we just update the parent region info in meta, to set split and offline to true, + // without changing the one in the region node. This is a bit confusing but the region info + // field in RegionStateNode is not expected to be changed in the current design. Need to find a + // possible way to address this problem, or at least adding more comments about the trick to + // deal with this problem, that when you want to filter out split parent, you need to check both + // the RegionState on whether it is split, and also the region info. If one of them matches then + // it is a split parent. And usually only one of them can match, as after restart, the region + // state will be changed from SPLIT to CLOSED. regionStateStore.splitRegion(parent, daughterA, daughterB, serverName, td); if (shouldAssignFavoredNodes(parent)) { List onlineServers = this.master.getServerManager().getOnlineServersList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java index 3bb3c4c0b358..061b8175886a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -173,7 +173,7 @@ public void deleteRegions(final List regionInfos) { regionInfos.forEach(this::deleteRegion); } - ArrayList getTableRegionStateNodes(final TableName tableName) { + List getTableRegionStateNodes(final TableName tableName) { final ArrayList regions = new ArrayList(); for (RegionStateNode node: regionsMap.tailMap(tableName.getName()).values()) { if (!node.getTable().equals(tableName)) break; @@ -241,8 +241,10 @@ public boolean hasTableRegionStates(final TableName tableName) { /** * @return Return online regions of table; does not include OFFLINE or SPLITTING regions. */ - public List getRegionsOfTable(final TableName table) { - return getRegionsOfTable(table, false); + public List getRegionsOfTable(TableName table) { + return getRegionsOfTable(table, + regionNode -> !regionNode.isInState(State.OFFLINE, State.SPLIT) && + !regionNode.getRegionInfo().isSplitParent()); } private HRegionLocation createRegionForReopen(RegionStateNode node) { @@ -346,16 +348,22 @@ public HRegionLocation checkReopened(HRegionLocation oldLoc) { } /** - * @return Return online regions of table; does not include OFFLINE or SPLITTING regions. + * Get the regions for enabling a table. + *

    + * Here we want the EnableTableProcedure to be more robust and can be used to fix some nasty + * states, so the checks in this method will be a bit strange. In general, a region can only be + * offline when it is split, for merging we will just delete the parent regions, but with HBCK we + * may force update the state of a region to fix some nasty bugs, so in this method we will try to + * bring the offline regions back if it is not split. That's why we only check for split state + * here. */ - public List getRegionsOfTable(TableName table, boolean offline) { - return getRegionsOfTable(table, state -> include(state, offline)); + public List getRegionsOfTableForEnabling(TableName table) { + return getRegionsOfTable(table, + regionNode -> !regionNode.isInState(State.SPLIT) && !regionNode.getRegionInfo().isSplit()); } /** - * @return Return the regions of the table; does not include OFFLINE unless you set - * offline to true. Does not include regions that are in the - * {@link State#SPLIT} state. + * @return Return the regions of the table and filter them. */ private List getRegionsOfTable(TableName table, Predicate filter) { return getTableRegionStateNodes(table).stream().filter(filter).map(n -> n.getRegionInfo()) @@ -368,7 +376,7 @@ private List getRegionsOfTable(TableName table, Predicatenode (do not include * if split or offline unless offline is set to true. */ - boolean include(final RegionStateNode node, final boolean offline) { + private boolean include(final RegionStateNode node, final boolean offline) { if (LOG.isTraceEnabled()) { LOG.trace("WORKING ON " + node + " " + node.getRegionInfo()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 1e48981e417c..8b295ec72fc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -97,9 +97,9 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS TableDescriptor tableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); int configuredReplicaCount = tableDescriptor.getRegionReplication(); - // Get regions for the table from memory; get both online and offline regions ('true'). + // Get regions for the table from memory List regionsOfTable = - env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true); + env.getAssignmentManager().getRegionStates().getRegionsOfTableForEnabling(tableName); // How many replicas do we currently have? Check regions returned from // in-memory state. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java index 48cca305700f..b24ec1626849 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java @@ -58,7 +58,7 @@ public class TestRegionStates { protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static ThreadPoolExecutor threadPool; - private static ExecutorCompletionService executorService; + private static ExecutorCompletionService executorService; @BeforeClass public static void setUp() throws Exception { @@ -66,7 +66,7 @@ public static void setUp() throws Exception { new ThreadFactoryBuilder().setNameFormat("ProcedureDispatcher-pool-%d").setDaemon(true) .setUncaughtExceptionHandler((t, e) -> LOG.warn("Failed thread " + t.getName(), e)) .build()); - executorService = new ExecutorCompletionService(threadPool); + executorService = new ExecutorCompletionService<>(threadPool); } @AfterClass @@ -129,13 +129,13 @@ public void testRegionDoubleCreation() throws Exception { checkTableRegions(stateMap, TABLE_NAME_C, NSMALL_RUNS); } - private void checkTableRegions(final RegionStates stateMap, - final TableName tableName, final int nregions) { - List hris = stateMap.getRegionsOfTable(tableName, true); - assertEquals(nregions, hris.size()); - for (int i = 1; i < hris.size(); ++i) { - long a = Bytes.toLong(hris.get(i - 1).getStartKey()); - long b = Bytes.toLong(hris.get(i + 0).getStartKey()); + private void checkTableRegions(final RegionStates stateMap, final TableName tableName, + final int nregions) { + List rns = stateMap.getTableRegionStateNodes(tableName); + assertEquals(nregions, rns.size()); + for (int i = 1; i < rns.size(); ++i) { + long a = Bytes.toLong(rns.get(i - 1).getRegionInfo().getStartKey()); + long b = Bytes.toLong(rns.get(i + 0).getRegionInfo().getStartKey()); assertEquals(b, a + 1); } } @@ -155,11 +155,6 @@ public Object call() { }); } - private Object createRegionNode(final RegionStates stateMap, - final TableName tableName, final long regionId) { - return stateMap.getOrCreateRegionStateNode(createRegionInfo(tableName, regionId)); - } - private RegionInfo createRegionInfo(final TableName tableName, final long regionId) { return RegionInfoBuilder.newBuilder(tableName) .setStartKey(Bytes.toBytes(regionId)) From d8de24c31185cf325ad7fa96df4c455f4c817192 Mon Sep 17 00:00:00 2001 From: sanjeetnishad95 Date: Fri, 23 Oct 2020 06:12:00 +0530 Subject: [PATCH 145/769] HBASE-25128 RSGroupInfo's toString() and hashCode() does not take into account configuration map. (#2484) Signed-off-by: Guanghao Zhang --- .../java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java index 1aa7ca1fedd0..bb4a4d7c6228 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -203,6 +203,9 @@ public String toString() { sb.append(", "); sb.append(" Tables:"); sb.append(this.tables); + sb.append(", "); + sb.append(" Configurations:"); + sb.append(this.configuration); return sb.toString(); } @@ -239,6 +242,7 @@ public int hashCode() { int result = servers.hashCode(); result = 31 * result + tables.hashCode(); result = 31 * result + name.hashCode(); + result = 31 * result + configuration.hashCode(); return result; } } From 5e3ffb1db6e2d1a42295385c294574b8c8eeebbb Mon Sep 17 00:00:00 2001 From: Sandeep Pal Date: Fri, 23 Oct 2020 12:23:36 +0530 Subject: [PATCH 146/769] HBASE-25193: Add support for row prefix and type in the WAL Pretty Printer Closes #2556 Signed-off-by: Wellington Chevreuil Signed-off-by: Bharath Vissapragada Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../region/WALProcedurePrettyPrinter.java | 2 +- .../hadoop/hbase/wal/WALPrettyPrinter.java | 89 ++++++++++++++----- 2 files changed, 70 insertions(+), 21 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java index a4ed7339845d..0e60709b5e09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java @@ -105,7 +105,7 @@ protected int doWork() throws Exception { if (!Bytes.equals(PROC_FAMILY, 0, PROC_FAMILY.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) { // We could have cells other than procedure edits, for example, a flush marker - WALPrettyPrinter.printCell(out, op, false); + WALPrettyPrinter.printCell(out, op, false, false); continue; } long procId = Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index a37efec610eb..07bcb1067ffc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -46,7 +46,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; @@ -85,7 +85,12 @@ public class WALPrettyPrinter { // List of tables for filter private final Set tableSet; private String region; + + // exact row which needs to be filtered private String row; + // prefix of rows which needs to be filtered + private String rowPrefix; + private boolean outputOnlyRowKey; // enable in order to output a single list of transactions from several files private boolean persistentOutput; @@ -107,6 +112,7 @@ public WALPrettyPrinter() { tableSet = new HashSet<>(); region = null; row = null; + rowPrefix = null; outputOnlyRowKey = false; persistentOutput = false; firstTxn = true; @@ -181,6 +187,17 @@ public void setRowFilter(String row) { this.row = row; } + /** + * sets the rowPrefix key prefix by which output will be filtered + * + * @param rowPrefix + * when not null, serves as a filter; only log entries with rows + * having this prefix will be printed + */ + public void setRowPrefixFilter(String rowPrefix) { + this.rowPrefix = rowPrefix; + } + /** * Option to print the row key only in case you just need the row keys from the WAL */ @@ -301,15 +318,12 @@ public void processFile(final Configuration conf, final Path p) List> actions = new ArrayList<>(); for (Cell cell : edit.getCells()) { // add atomic operation to txn - Map op = new HashMap<>(toStringMap(cell, outputOnlyRowKey)); - if (outputValues) { - op.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell))); - } - // check row output filter - if (row == null || ((String) op.get("row")).equals(row)) { - actions.add(op); + Map op = + new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); + if (op.isEmpty()) { + continue; } - op.put("total_size_sum", cell.heapSize()); + actions.add(op); } if (actions.isEmpty()) { continue; @@ -326,15 +340,19 @@ public void processFile(final Configuration conf, final Path p) out.print(GSON.toJson(txn)); } else { // Pretty output, complete with indentation by atomic action - out.println(String.format(outputTmpl, + if (!outputOnlyRowKey) { + out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime))); + } for (int i = 0; i < actions.size(); i++) { Map op = actions.get(i); - printCell(out, op, outputValues); + printCell(out, op, outputValues, outputOnlyRowKey); } } - out.println("edit heap size: " + entry.getEdit().heapSize()); - out.println("position: " + log.getPosition()); + if (!outputOnlyRowKey) { + out.println("edit heap size: " + entry.getEdit().heapSize()); + out.println("position: " + log.getPosition()); + } } } finally { log.close(); @@ -344,9 +362,17 @@ public void processFile(final Configuration conf, final Path p) } } - public static void printCell(PrintStream out, Map op, boolean outputValues) { - out.println("row=" + op.get("row") + ", type=" + op.get("type") + ", column=" + - op.get("family") + ":" + op.get("qualifier")); + public static void printCell(PrintStream out, Map op, + boolean outputValues, boolean outputOnlyRowKey) { + String rowDetails = "row=" + op.get("row"); + if (outputOnlyRowKey) { + out.println(rowDetails); + return; + } + + rowDetails += ", column=" + op.get("family") + ":" + op.get("qualifier"); + rowDetails += ", type=" + op.get("type"); + out.println(rowDetails); if (op.get("tag") != null) { out.println(" tag: " + op.get("tag")); } @@ -356,11 +382,20 @@ public static void printCell(PrintStream out, Map op, boolean ou out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, boolean printRowKeyOnly) { + public static Map toStringMap(Cell cell, + boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); - stringMap.put("row", - Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + String rowKey = Bytes.toStringBinary(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength()); + // Row and row prefix are mutually options so both cannot be true at the + // same time. We can include checks in the same condition + // Check if any of the filters are satisfied by the row, if not return empty map + if ((!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) || + (!Strings.isNullOrEmpty(row) && !rowKey.equals(row))) { + return stringMap; + } + stringMap.put("row", rowKey); if (printRowKeyOnly) { return stringMap; } @@ -372,6 +407,7 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly cell.getQualifierLength())); stringMap.put("timestamp", cell.getTimestamp()); stringMap.put("vlen", cell.getValueLength()); + stringMap.put("total_size_sum", cell.heapSize()); if (cell.getTagsLength() > 0) { List tagsString = new ArrayList<>(); Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); @@ -382,11 +418,14 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly } stringMap.put("tag", tagsString); } + if (outputValues) { + stringMap.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell))); + } return stringMap; } public static Map toStringMap(Cell cell) { - return toStringMap(cell, false); + return toStringMap(cell, false, null, null, false); } public static void main(String[] args) throws IOException { @@ -417,6 +456,7 @@ public static void run(String[] args) throws IOException { options.addOption("k", "outputOnlyRowKey", false, "Print only row keys"); options.addOption("w", "row", true, "Row to filter by. Pass row name."); + options.addOption("f", "rowPrefix", true, "Row prefix to filter by."); options.addOption("g", "goto", true, "Position to seek to in the file"); WALPrettyPrinter printer = new WALPrettyPrinter(); @@ -450,8 +490,17 @@ public static void run(String[] args) throws IOException { printer.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s"))); } if (cmd.hasOption("w")) { + if (cmd.hasOption("f")) { + throw new ParseException("Row and Row-prefix cannot be supplied together"); + } printer.setRowFilter(cmd.getOptionValue("w")); } + if (cmd.hasOption("f")) { + if (cmd.hasOption("w")) { + throw new ParseException("Row and Row-prefix cannot be supplied together"); + } + printer.setRowPrefixFilter(cmd.getOptionValue("f")); + } if (cmd.hasOption("g")) { printer.setPosition(Long.parseLong(cmd.getOptionValue("g"))); } From 881c92b892844be567d5f26b161a820ebf319f84 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Sat, 24 Oct 2020 23:49:14 +0900 Subject: [PATCH 147/769] HBASE-25206 Data loss can happen if a cloned table loses original split region(delete table) (#2569) Signed-off-by: Duo Zhang --- .../hbase/master/assignment/RegionStates.java | 12 +++++++ .../TransitRegionStateProcedure.java | 1 + .../procedure/DeleteTableProcedure.java | 3 +- ...romClientAfterSplittingRegionTestBase.java | 36 +++++++++++++++++++ 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java index 061b8175886a..06378002ecbd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -362,6 +362,18 @@ public List getRegionsOfTableForEnabling(TableName table) { regionNode -> !regionNode.isInState(State.SPLIT) && !regionNode.getRegionInfo().isSplit()); } + /** + * Get the regions for deleting a table. + *

    + * Here we need to return all the regions irrespective of the states in order to archive them + * all. This is because if we don't archive OFFLINE/SPLIT regions and if a snapshot or a cloned + * table references to the regions, we will lose the data of the regions. + */ + public List getRegionsOfTableForDeleting(TableName table) { + return getTableRegionStateNodes(table).stream().map(RegionStateNode::getRegionInfo) + .collect(Collectors.toList()); + } + /** * @return Return the regions of the table and filter them. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index b0a697deaa97..63bb345cffed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -348,6 +348,7 @@ protected Flow executeFromState(MasterProcedureEnv env, RegionStateTransitionSta LOG.error( "Cannot assign replica region {} because its primary region {} does not exist.", regionNode.getRegionInfo(), defaultRI); + regionNode.unsetProcedure(this); return Flow.NO_MORE_STATE; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 9cfce0ce3632..80dddc7ccda1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -99,7 +99,8 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s // TODO: Move out... in the acquireLock() LOG.debug("Waiting for RIT for {}", this); - regions = env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()); + regions = env.getAssignmentManager().getRegionStates() + .getRegionsOfTableForDeleting(getTableName()); assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; ProcedureSyncWait.waitRegionInTransition(env, regions); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java index 5ed100f6d296..e8c016777283 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java @@ -80,4 +80,40 @@ public void testCloneSnapshotAfterSplittingRegion() throws IOException, Interrup admin.catalogJanitorSwitch(true); } } + + @Test + public void testCloneSnapshotBeforeSplittingRegionAndDroppingTable() + throws IOException, InterruptedException { + // Turn off the CatalogJanitor + admin.catalogJanitorSwitch(false); + + try { + // Take a snapshot + admin.snapshot(snapshotName2, tableName); + + // Clone the snapshot to another table + TableName clonedTableName = + TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + admin.cloneSnapshot(snapshotName2, clonedTableName); + SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); + + // Split the first region of the original table + List regionInfos = admin.getRegions(tableName); + RegionReplicaUtil.removeNonDefaultRegions(regionInfos); + splitRegion(regionInfos.get(0)); + + // Drop the original table + admin.disableTable(tableName); + admin.deleteTable(tableName); + + // Disable and enable the cloned table. This should be successful + admin.disableTable(clonedTableName); + admin.enableTable(clonedTableName); + SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); + + verifyRowCount(TEST_UTIL, clonedTableName, snapshot1Rows); + } finally { + admin.catalogJanitorSwitch(true); + } + } } From e5d4e2fc8138cba0c4a1da2b42b51042da3d9c7e Mon Sep 17 00:00:00 2001 From: Reid Chan Date: Sun, 25 Oct 2020 17:46:14 +0800 Subject: [PATCH 148/769] HBASE-25189 [Metrics] Add checkAndPut and checkAndDelete latency metrics at table level (#2549) Signed-off-by: Viraj Jasani --- .../regionserver/MetricsTableLatencies.java | 25 +++++++++++++ .../MetricsTableLatenciesImpl.java | 36 +++++++++++++++++++ .../regionserver/MetricsRegionServer.java | 15 ++++++-- .../hbase/regionserver/RSRpcServices.java | 9 +++-- .../RegionServerTableMetrics.java | 12 +++++++ .../regionserver/TestMetricsRegionServer.java | 17 ++------- 6 files changed, 94 insertions(+), 20 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 231bad1be879..2aeb82b0d64d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -53,6 +53,9 @@ public interface MetricsTableLatencies { String DELETE_BATCH_TIME = "deleteBatchTime"; String INCREMENT_TIME = "incrementTime"; String APPEND_TIME = "appendTime"; + String CHECK_AND_DELETE_TIME = "checkAndDeleteTime"; + String CHECK_AND_PUT_TIME = "checkAndPutTime"; + String CHECK_AND_MUTATE_TIME = "checkAndMutateTime"; /** * Update the Put time histogram @@ -125,4 +128,26 @@ public interface MetricsTableLatencies { * @param t time it took */ void updateScanTime(String tableName, long t); + + /** + * Update the CheckAndDelete time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndDelete(String nameAsString, long time); + + /** + * Update the CheckAndPut time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndPut(String nameAsString, long time); + + /** + * Update the CheckAndMutate time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndMutate(String nameAsString, long time); + } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index 5a3f3b9d2491..5e13a614ff0c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -47,6 +47,9 @@ public static class TableHistograms { final MetricHistogram deleteBatchTimeHisto; final MetricHistogram scanTimeHisto; final MetricHistogram scanSizeHisto; + final MetricHistogram checkAndDeleteTimeHisto; + final MetricHistogram checkAndPutTimeHisto; + final MetricHistogram checkAndMutateTimeHisto; TableHistograms(DynamicMetricsRegistry registry, TableName tn) { getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME)); @@ -60,6 +63,12 @@ public static class TableHistograms { qualifyMetricsName(tn, DELETE_BATCH_TIME)); scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME)); scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE)); + checkAndDeleteTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); + checkAndPutTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); + checkAndMutateTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); } public void updatePut(long time) { @@ -97,6 +106,18 @@ public void updateScanSize(long scanSize) { public void updateScanTime(long t) { scanTimeHisto.add(t); } + + public void updateCheckAndDeleteTime(long t) { + checkAndDeleteTimeHisto.add(t); + } + + public void updateCheckAndPutTime(long t) { + checkAndPutTimeHisto.add(t); + } + + public void updateCheckAndMutateTime(long t) { + checkAndMutateTimeHisto.add(t); + } } @VisibleForTesting @@ -174,6 +195,21 @@ public void updateScanTime(String tableName, long t) { getOrCreateTableHistogram(tableName).updateScanTime(t); } + @Override + public void updateCheckAndDelete(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndDeleteTime(time); + } + + @Override + public void updateCheckAndPut(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndPutTime(time); + } + + @Override + public void updateCheckAndMutate(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndMutateTime(time); + } + @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 715da6c47bd8..e37a2722c9f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -145,15 +145,24 @@ public void updateDeleteBatch(TableName tn, long t) { serverSource.updateDeleteBatch(t); } - public void updateCheckAndDelete(long t) { + public void updateCheckAndDelete(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndDelete(tn, t); + } serverSource.updateCheckAndDelete(t); } - public void updateCheckAndPut(long t) { + public void updateCheckAndPut(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndPut(tn, t); + } serverSource.updateCheckAndPut(t); } - public void updateCheckAndMutate(long t) { + public void updateCheckAndMutate(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndMutate(tn, t); + } serverSource.updateCheckAndMutate(t); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index a59f5e609b17..d7ba9fc8a289 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3076,15 +3076,18 @@ private CheckAndMutateResult checkAndMutate(HRegion region, OperationQuota quota MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); if (metricsRegionServer != null) { long after = EnvironmentEdgeManager.currentTime(); - metricsRegionServer.updateCheckAndMutate(after - before); + metricsRegionServer.updateCheckAndMutate( + region.getRegionInfo().getTable(), after - before); MutationType type = mutation.getMutateType(); switch (type) { case PUT: - metricsRegionServer.updateCheckAndPut(after - before); + metricsRegionServer.updateCheckAndPut( + region.getRegionInfo().getTable(), after - before); break; case DELETE: - metricsRegionServer.updateCheckAndDelete(after - before); + metricsRegionServer.updateCheckAndDelete( + region.getRegionInfo().getTable(), after - before); break; default: break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java index ec6c0493bb75..812ae45e8840 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java @@ -65,6 +65,18 @@ public void updateDeleteBatch(TableName table, long time) { latencies.updateDeleteBatch(table.getNameAsString(), time); } + public void updateCheckAndDelete(TableName table, long time) { + latencies.updateCheckAndDelete(table.getNameAsString(), time); + } + + public void updateCheckAndPut(TableName table, long time) { + latencies.updateCheckAndPut(table.getNameAsString(), time); + } + + public void updateCheckAndMutate(TableName table, long time) { + latencies.updateCheckAndMutate(table.getNameAsString(), time); + } + public void updateScanTime(TableName table, long time) { latencies.updateScanTime(table.getNameAsString(), time); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index 574b1e4130c8..e56eb0f20aaa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -151,9 +151,9 @@ public void testSlowCount() { for (int i=0; i < 17; i ++) { rsm.updatePut(null, 17); rsm.updateDelete(null, 17); - rsm.updateCheckAndDelete(17); - rsm.updateCheckAndPut(17); - rsm.updateCheckAndMutate(17); + rsm.updateCheckAndDelete(null, 17); + rsm.updateCheckAndPut(null, 17); + rsm.updateCheckAndMutate(null, 17); } HELPER.assertCounter("appendNumOps", 24, serverSource); @@ -174,17 +174,6 @@ public void testSlowCount() { HELPER.assertCounter("slowPutCount", 16, serverSource); } - String FLUSH_TIME = "flushTime"; - String FLUSH_TIME_DESC = "Histogram for the time in millis for memstore flush"; - String FLUSH_MEMSTORE_SIZE = "flushMemstoreSize"; - String FLUSH_MEMSTORE_SIZE_DESC = "Histogram for number of bytes in the memstore for a flush"; - String FLUSH_FILE_SIZE = "flushFileSize"; - String FLUSH_FILE_SIZE_DESC = "Histogram for number of bytes in the resulting file for a flush"; - String FLUSHED_OUTPUT_BYTES = "flushedOutputBytes"; - String FLUSHED_OUTPUT_BYTES_DESC = "Total number of bytes written from flush"; - String FLUSHED_MEMSTORE_BYTES = "flushedMemstoreBytes"; - String FLUSHED_MEMSTORE_BYTES_DESC = "Total number of bytes of cells in memstore from flush"; - @Test public void testFlush() { rsm.updateFlush(null, 1, 2, 3); From 164cc5a3dc44416ed0d43c4ea1ce18e860856ae0 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 26 Oct 2020 10:03:43 +0800 Subject: [PATCH 149/769] HBASE-25215 TestClientOperationTimeout.testScanTimeout is flaky (#2583) Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/RSRpcServices.java | 2 +- .../hbase/TestClientOperationTimeout.java | 95 +++++++++++-------- 2 files changed, 55 insertions(+), 42 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index d7ba9fc8a289..e15e8e9c1753 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -271,7 +271,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, AdminService.BlockingInterface, ClientService.BlockingInterface, PriorityFunction, ConfigurationObserver { - protected static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class); + private static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class); /** RPC scheduler to use for the region server. */ public static final String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java index 10a3cb73db6d..eb62c973a655 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hbase; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; + import java.io.IOException; import java.net.SocketTimeoutException; -import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -27,12 +31,12 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.ipc.CallTimeoutException; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RSRpcServices; @@ -40,12 +44,13 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; @@ -69,6 +74,8 @@ @Category({ ClientTests.class, MediumTests.class }) public class TestClientOperationTimeout { + private static final Logger LOG = LoggerFactory.getLogger(TestClientOperationTimeout.class); + @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestClientOperationTimeout.class); @@ -91,7 +98,7 @@ public class TestClientOperationTimeout { private static Table TABLE; @BeforeClass - public static void setUpClass() throws Exception { + public static void setUp() throws Exception { // Set RegionServer class and use default values for other options. StartMiniClusterOption option = StartMiniClusterOption.builder().rsClass(DelayedRegionServer.class).build(); @@ -108,14 +115,6 @@ public static void setUpClass() throws Exception { TABLE = CONN.getTable(TABLE_NAME); } - @Before - public void setUp() throws Exception { - DELAY_GET = 0; - DELAY_SCAN = 0; - DELAY_MUTATE = 0; - DELAY_BATCH_MUTATE = 0; - } - @AfterClass public static void tearDown() throws Exception { Closeables.close(TABLE, true); @@ -123,6 +122,14 @@ public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } + @Before + public void setUpBeforeTest() throws Exception { + DELAY_GET = 0; + DELAY_SCAN = 0; + DELAY_MUTATE = 0; + DELAY_BATCH_MUTATE = 0; + } + /** * Tests that a get on a table throws {@link RetriesExhaustedException} when the operation takes * longer than 'hbase.client.operation.timeout'. @@ -132,10 +139,11 @@ public void testGetTimeout() { DELAY_GET = 600; try { TABLE.get(new Get(ROW)); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof CallTimeoutException); + LOG.info("Got exception for get", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(CallTimeoutException.class)); } } @@ -150,10 +158,11 @@ public void testPutTimeout() { put.addColumn(FAMILY, QUALIFIER, VALUE); try { TABLE.put(put); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof CallTimeoutException); + LOG.info("Got exception for put", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(CallTimeoutException.class)); } } @@ -164,20 +173,17 @@ public void testPutTimeout() { @Test public void testMultiPutsTimeout() { DELAY_BATCH_MUTATE = 600; - Put put1 = new Put(ROW); - put1.addColumn(FAMILY, QUALIFIER, VALUE); - Put put2 = new Put(ROW); - put2.addColumn(FAMILY, QUALIFIER, VALUE); - List puts = new ArrayList<>(); - puts.add(put1); - puts.add(put2); + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + Put put2 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + List puts = Arrays.asList(put1, put2); try { TABLE.batch(puts, new Object[2]); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof RetriesExhaustedException - && e.getCause().getCause() instanceof CallTimeoutException); + LOG.info("Got exception for batch", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause().getCause(), instanceOf(CallTimeoutException.class)); } } @@ -186,19 +192,26 @@ public void testMultiPutsTimeout() { * longer than 'hbase.client.scanner.timeout.period'. */ @Test - public void testScanTimeout() { + public void testScanTimeout() throws IOException, InterruptedException { + // cache the region location. + try (RegionLocator locator = TABLE.getRegionLocator()) { + locator.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY); + } + // sleep a bit to make sure the location has been cached as it is an async operation. + Thread.sleep(100); DELAY_SCAN = 600; - try { - ResultScanner scanner = TABLE.getScanner(new Scan()); + try (ResultScanner scanner = TABLE.getScanner(new Scan())) { scanner.next(); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof TimeoutIOException); + LOG.info("Got exception for scan", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(CallTimeoutException.class)); } } - private static class DelayedRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer { + public static final class DelayedRegionServer + extends MiniHBaseCluster.MiniHBaseClusterRegionServer { public DelayedRegionServer(Configuration conf) throws IOException, InterruptedException { super(conf); } @@ -212,14 +225,14 @@ protected RSRpcServices createRpcServices() throws IOException { /** * This {@link RSRpcServices} class injects delay for Rpc calls and after executes super methods. */ - public static class DelayedRSRpcServices extends RSRpcServices { + private static final class DelayedRSRpcServices extends RSRpcServices { DelayedRSRpcServices(HRegionServer rs) throws IOException { super(rs); } @Override public ClientProtos.GetResponse get(RpcController controller, ClientProtos.GetRequest request) - throws ServiceException { + throws ServiceException { try { Thread.sleep(DELAY_GET); } catch (InterruptedException e) { @@ -230,7 +243,7 @@ public ClientProtos.GetResponse get(RpcController controller, ClientProtos.GetRe @Override public ClientProtos.MutateResponse mutate(RpcController rpcc, - ClientProtos.MutateRequest request) throws ServiceException { + ClientProtos.MutateRequest request) throws ServiceException { try { Thread.sleep(DELAY_MUTATE); } catch (InterruptedException e) { @@ -241,7 +254,7 @@ public ClientProtos.MutateResponse mutate(RpcController rpcc, @Override public ClientProtos.ScanResponse scan(RpcController controller, - ClientProtos.ScanRequest request) throws ServiceException { + ClientProtos.ScanRequest request) throws ServiceException { try { Thread.sleep(DELAY_SCAN); } catch (InterruptedException e) { @@ -252,7 +265,7 @@ public ClientProtos.ScanResponse scan(RpcController controller, @Override public ClientProtos.MultiResponse multi(RpcController rpcc, ClientProtos.MultiRequest request) - throws ServiceException { + throws ServiceException { try { Thread.sleep(DELAY_BATCH_MUTATE); } catch (InterruptedException e) { From 4bbc772ffcb7039a0f5de6bf09eaaceecd09ccdb Mon Sep 17 00:00:00 2001 From: WenFeiYi Date: Tue, 27 Oct 2020 22:01:57 +0800 Subject: [PATCH 150/769] HBASE-25173 Remove owner related methods in TableDescriptor/TableDescriptorBuilder (#2541) Signed-off-by: Duo Zhang --- .../hadoop/hbase/client/TableDescriptor.java | 7 --- .../hbase/client/TableDescriptorBuilder.java | 59 ------------------- .../hbase/coprocessor/TestSecureExport.java | 14 ++--- .../security/access/AccessController.java | 8 +-- .../SnapshotScannerHDFSAclController.java | 3 +- .../hadoop/hbase/HBaseTestingUtility.java | 26 +++++++- .../hbase/client/SnapshotWithAclTestBase.java | 13 ++-- .../hbase/rsgroup/TestRSGroupsWithACL.java | 6 +- .../hbase/security/access/SecureTestUtil.java | 12 ++++ .../security/access/TestAccessController.java | 34 +++++------ .../access/TestAccessController3.java | 8 ++- .../TestCellACLWithMultipleVersions.java | 13 ++-- .../hbase/security/access/TestCellACLs.java | 10 ++-- .../security/access/TestHDFSAclHelper.java | 29 +++++---- .../access/TestScanEarlyTermination.java | 9 +-- .../access/TestWithDisabledAuthorization.java | 10 ++-- hbase-shell/src/main/ruby/hbase/admin.rb | 3 +- .../src/main/ruby/shell/commands/alter.rb | 2 +- .../src/main/ruby/shell/commands/create.rb | 2 +- hbase-shell/src/test/ruby/hbase/admin_test.rb | 7 +-- 20 files changed, 123 insertions(+), 152 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index a4523872c9c5..1440c28787d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -177,13 +177,6 @@ public interface TableDescriptor { */ TableName getTableName(); - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - String getOwnerString(); - /** * Get the region server group this table belongs to. The regions of this table will be placed * only on the region servers within this group. If not present, will be placed on diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 1328f7d017e2..c611a217960d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -71,12 +70,6 @@ public class TableDescriptorBuilder { private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); - @InterfaceAudience.Private - public static final String OWNER = "OWNER"; - @InterfaceAudience.Private - public static final Bytes OWNER_KEY - = new Bytes(Bytes.toBytes(OWNER)); - /** * Used by rest interface to access this metadata attribute * which denotes if the table is Read Only. @@ -485,26 +478,6 @@ public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { return this; } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public TableDescriptorBuilder setOwner(User owner) { - desc.setOwner(owner); - return this; - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public TableDescriptorBuilder setOwnerString(String ownerString) { - desc.setOwnerString(ownerString); - return this; - } - public TableDescriptorBuilder setPriority(int priority) { desc.setPriority(priority); return this; @@ -1550,38 +1523,6 @@ public void removeCoprocessor(String className) { } } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public ModifyableTableDescriptor setOwner(User owner) { - return setOwnerString(owner != null ? owner.getShortName() : null); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - // used by admin.rb:alter(table_name,*args) to update owner. - @Deprecated - public ModifyableTableDescriptor setOwnerString(String ownerString) { - return setValue(OWNER_KEY, ownerString); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Override - @Deprecated - public String getOwnerString() { - // Note that every table should have an owner (i.e. should have OWNER_KEY set). - // hbase:meta should return system user as owner, not null (see - // MasterFileSystem.java:bootstrap()). - return getOrDefault(OWNER_KEY, Function.identity(), null); - } - /** * @return the bytes in pb format */ diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 2f5024737dbc..d3be45b56f68 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -216,6 +216,7 @@ public static void beforeClass() throws Exception { Permission.Action.EXEC, Permission.Action.READ, Permission.Action.WRITE); + SecureTestUtil.grantGlobal(UTIL, USER_OWNER, Permission.Action.CREATE); addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER), Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); } @@ -236,11 +237,11 @@ public static void afterClass() throws Exception { public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) + .newBuilder(TableName.valueOf(exportTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); SecureTestUtil.grantOnTable(UTIL, USER_RO, TableName.valueOf(exportTable), null, null, Permission.Action.READ); @@ -340,9 +341,9 @@ public void testVisibilityLabels() throws IOException, Throwable { final TableDescriptor exportHtd = TableDescriptorBuilder .newBuilder(TableName.valueOf(exportTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); AccessTestAction putAction = () -> { Put p1 = new Put(ROW1); p1.addColumn(FAMILYA, QUAL, NOW, QUAL); @@ -398,9 +399,8 @@ public void testVisibilityLabels() throws IOException, Throwable { final TableDescriptor importHtd = TableDescriptorBuilder .newBuilder(TableName.valueOf(importTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, importHtd, new byte[][]{Bytes.toBytes("s")}); + SecureTestUtil.createTable(UTIL, owner, importHtd, new byte[][]{Bytes.toBytes("s")}); AccessTestAction importAction = () -> { String[] args = new String[]{ "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 3779903f869a..3a6c3aae657b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -804,10 +804,7 @@ public void postCompletedCreateTableAction( + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " + getClass().getSimpleName() + " should be configured as the first Coprocessor"); } else { - String owner = desc.getOwnerString(); - // default the table owner to current user, if not specified. - if (owner == null) - owner = getActiveUser(c).getShortName(); + String owner = getActiveUser(c).getShortName(); final UserPermission userPermission = new UserPermission(owner, Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); // switch to the real hbase master user for doing the RPC on the ACL table @@ -906,8 +903,7 @@ public void postModifyTable(ObserverContext c, Tab TableDescriptor oldDesc, TableDescriptor currentDesc) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); // default the table owner to current user, if not specified. - final String owner = (currentDesc.getOwnerString() != null) ? currentDesc.getOwnerString() : - getActiveUser(c).getShortName(); + final String owner = getActiveUser(c).getShortName(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java index 5c4ba0d68505..e52134e7d065 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java @@ -179,8 +179,7 @@ public void postCompletedCreateTableAction(ObserverContext perms = admin.getUserPermissions(GetUserPermissionsRequest.newBuilder(tableName).build()); @@ -1724,13 +1726,9 @@ public void testPermissionList() throws Exception { assertFalse("User should not be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); - // disable table before modification - admin.disableTable(tableName); - User newOwner = User.createUserForTesting(conf, "new_owner", new String[] {}); - tableDescriptor = - TableDescriptorBuilder.newBuilder(tableDescriptor).setOwner(newOwner).build(); - admin.modifyTable(tableDescriptor); + grantOnTable(TEST_UTIL, newOwner.getShortName(), tableName, + null, null, Permission.Action.values()); perms = admin.getUserPermissions(GetUserPermissionsRequest.newBuilder(tableName).build()); UserPermission newOwnerperm = new UserPermission(newOwner.getName(), @@ -1758,7 +1756,7 @@ public void testGlobalPermissionList() throws Exception { new UserPermission(user, Permission.newBuilder().withActions(Action.values()).build())); } assertTrue("Only super users, global users and user admin has permission on table hbase:acl " + - "per setup", perms.size() == 5 + superUsers.size() && + "per setup", perms.size() == 6 + superUsers.size() && hasFoundUserPermission(adminPerms, perms)); } @@ -2278,8 +2276,8 @@ private void createTestTable(TableName tname) throws Exception { private void createTestTable(TableName tname, byte[] cf) throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tname) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cf).setMaxVersions(100).build()) - .setOwner(USER_OWNER).build(); - createTable(TEST_UTIL, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + .build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); } @Test @@ -2858,7 +2856,7 @@ public void testAccessControllerUserPermsRegexHandling() throws Exception { // Verify that we can read sys-tables String aclTableName = PermissionStorage.ACL_TABLE_NAME.getNameAsString(); - assertEquals(5, SUPERUSER.runAs(getPrivilegedAction(aclTableName)).size()); + assertEquals(6, SUPERUSER.runAs(getPrivilegedAction(aclTableName)).size()); assertEquals(0, testRegexHandler.runAs(getPrivilegedAction(aclTableName)).size()); // Grant TABLE ADMIN privs to testUserPerms @@ -3517,10 +3515,10 @@ public Object run() throws Exception { // Validate global user permission List userPermissions; - assertEquals(5 + superUserCount, AccessControlClient.getUserPermissions(conn, null).size()); - assertEquals(5 + superUserCount, + assertEquals(6 + superUserCount, AccessControlClient.getUserPermissions(conn, null).size()); + assertEquals(6 + superUserCount, AccessControlClient.getUserPermissions(conn, HConstants.EMPTY_STRING).size()); - assertEquals(5 + superUserCount, + assertEquals(6 + superUserCount, AccessControlClient.getUserPermissions(conn, null, HConstants.EMPTY_STRING).size()); userPermissions = AccessControlClient.getUserPermissions(conn, null, USER_ADMIN.getName()); verifyGetUserPermissionResult(userPermissions, 1, null, null, USER_ADMIN.getName(), superUsers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java index 53e07ff8101e..8860d5f06587 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java @@ -189,6 +189,9 @@ public static void setupBeforeClass() throws Exception { USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE }); + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Permission.Action.CREATE); + systemUserConnection = TEST_UTIL.getConnection(); setUpTableAndUserPermissions(); } @@ -207,9 +210,8 @@ public static void tearDownAfterClass() throws Exception { private static void setUpTableAndUserPermissions() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) - .setOwner(USER_OWNER).build(); - createTable(TEST_UTIL, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index 1d3a54e13f6d..f3035a96cc4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -125,6 +124,9 @@ public static void setupBeforeClass() throws Exception { GROUP_USER = User.createUserForTesting(conf, "group_user", new String[] { GROUP }); usersAndGroups = new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }; + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -138,14 +140,9 @@ public void setUp() throws Exception { .setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY1).setMaxVersions(4).build()) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY2).setMaxVersions(4).build()) - .setOwner(USER_OWNER).build(); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY2).setMaxVersions(4).build()).build(); // Create the test table (owner added to the _acl_ table) - try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - try (Admin admin = connection.getAdmin()) { - admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") }); - } - } + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitTableEnabled(testTable.getTableName()); LOG.info("Sleeping a second because of HBASE-12581"); Threads.sleep(1000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java index 1515e1410a59..6d238284cdd4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -127,6 +126,9 @@ public static void setupBeforeClass() throws Exception { GROUP_USER = User.createUserForTesting(conf, "group_user", new String[] { GROUP }); usersAndGroups = new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }; + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -137,12 +139,10 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { // Create the test table (owner added to the _acl_ table) - Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(4).build()) - .setOwner(USER_OWNER).build(); - admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(4).build()).build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitTableEnabled(testTable.getTableName()); LOG.info("Sleeping a second because of HBASE-12581"); Threads.sleep(1000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java index 420fb977bf4f..e4e37e08ba7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java @@ -39,6 +39,8 @@ final class TestHDFSAclHelper { private static final Logger LOG = LoggerFactory.getLogger(TestHDFSAclHelper.class); + private static final String USER_OWNER = "owner"; + private TestHDFSAclHelper() { } @@ -55,33 +57,41 @@ static void createNamespace(HBaseTestingUtility util, String namespace) throws I } } - static Table createTable(HBaseTestingUtility util, TableName tableName) throws IOException { + static Table createTable(HBaseTestingUtility util, TableName tableName) throws Exception { createNamespace(util, tableName.getNamespaceAsString()); TableDescriptor td = getTableDescriptorBuilder(util, tableName) .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; - return util.createTable(td, splits); + User user = User.createUserForTesting(util.getConfiguration(), USER_OWNER, new String[] {}); + SecureTestUtil.grantGlobal(util, user.getShortName(), Permission.Action.CREATE); + SecureTestUtil.createTable(util, user, td, splits); + return util.getConnection().getTable(tableName); } - static Table createMobTable(HBaseTestingUtility util, TableName tableName) throws IOException { + static Table createMobTable(HBaseTestingUtility util, TableName tableName) throws Exception { createNamespace(util, tableName.getNamespaceAsString()); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN1).setMobEnabled(true) .setMobThreshold(0).build()) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN2).setMobEnabled(true) .setMobThreshold(0).build()) - .setOwner(User.createUserForTesting(util.getConfiguration(), "owner", new String[] {})) .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; - return util.createTable(td, splits); + User user = User.createUserForTesting(util.getConfiguration(), USER_OWNER, new String[] {}); + SecureTestUtil.grantGlobal(util, user.getShortName(), Permission.Action.CREATE); + SecureTestUtil.createTable(util, user, td, splits); + return util.getConnection().getTable(tableName); } static TableDescriptor createUserScanSnapshotDisabledTable(HBaseTestingUtility util, - TableName tableName) throws IOException { + TableName tableName) throws Exception { createNamespace(util, tableName.getNamespaceAsString()); TableDescriptor td = getTableDescriptorBuilder(util, tableName).build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; - try (Table t = util.createTable(td, splits)) { + User user = User.createUserForTesting(util.getConfiguration(), USER_OWNER, new String[] {}); + SecureTestUtil.grantGlobal(util, user.getShortName(), Permission.Action.CREATE); + SecureTestUtil.createTable(util, user, td, splits); + try (Table t = util.getConnection().getTable(tableName)) { put(t); } return td; @@ -91,11 +101,10 @@ static TableDescriptorBuilder getTableDescriptorBuilder(HBaseTestingUtility util TableName tableName) { return TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN1).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN2).build()) - .setOwner(User.createUserForTesting(util.getConfiguration(), "owner", new String[] {})); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN2).build()); } - static void createTableAndPut(HBaseTestingUtility util, TableName tableNam) throws IOException { + static void createTableAndPut(HBaseTestingUtility util, TableName tableNam) throws Exception { try (Table t = createTable(util, tableNam)) { put(t); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java index f8ac4f696aee..aade90ca6157 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -107,6 +106,9 @@ public static void setupBeforeClass() throws Exception { // create a set of test users USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]); USER_OTHER = User.createUserForTesting(conf, "other", new String[0]); + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -116,9 +118,8 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { - Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(testTable.getTableName()).setOwner(USER_OWNER) + TableDescriptorBuilder.newBuilder(testTable.getTableName()) .setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY1).setMaxVersions(10).build()) .setColumnFamily( @@ -127,7 +128,7 @@ public void setUp() throws Exception { // want to confirm that the per-table configuration is properly picked up. .setValue(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, "true").build(); - admin.createTable(tableDescriptor); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor); TEST_UTIL.waitUntilAllRegionsAssigned(testTable.getTableName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index 47458f3dd372..a08456a8917a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; @@ -167,6 +166,9 @@ public static void setupBeforeClass() throws Exception { USER_RO = User.createUserForTesting(conf, "rouser", new String[0]); USER_QUAL = User.createUserForTesting(conf, "rwpartial", new String[0]); USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -177,12 +179,10 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { // Create the test table (owner added to the _acl_ table) - Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) - .setOwner(USER_OWNER).build(); - admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitUntilAllRegionsAssigned(testTable.getTableName()); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(testTable.getTableName()).get(0); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index a91b273033c9..d3492fa7c1c5 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1470,8 +1470,8 @@ def list_locks end # Parse arguments and update TableDescriptorBuilder accordingly + # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity def update_tdb_from_arg(tdb, arg) - tdb.setOwnerString(arg.delete(TableDescriptorBuilder::OWNER)) if arg.include?(TableDescriptorBuilder::OWNER) tdb.setMaxFileSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MAX_FILESIZE))) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE) tdb.setReadOnly(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::READONLY))) if arg.include?(TableDescriptorBuilder::READONLY) tdb.setCompactionEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::COMPACTION_ENABLED))) if arg.include?(TableDescriptorBuilder::COMPACTION_ENABLED) @@ -1490,6 +1490,7 @@ def update_tdb_from_arg(tdb, arg) set_user_metadata(tdb, arg.delete(METADATA)) if arg[METADATA] set_descriptor_config(tdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] end + # rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity #---------------------------------------------------------------------------------------------- # clear compaction queues diff --git a/hbase-shell/src/main/ruby/shell/commands/alter.rb b/hbase-shell/src/main/ruby/shell/commands/alter.rb index 456d6d5dbd12..22e6e42e69c3 100644 --- a/hbase-shell/src/main/ruby/shell/commands/alter.rb +++ b/hbase-shell/src/main/ruby/shell/commands/alter.rb @@ -95,7 +95,7 @@ def help hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 }, { MAX_FILESIZE => '134217728' }, { METHOD => 'delete', NAME => 'f2' }, - OWNER => 'johndoe', METADATA => { 'mykey' => 'myvalue' } + METADATA => { 'mykey' => 'myvalue' } EOF end diff --git a/hbase-shell/src/main/ruby/shell/commands/create.rb b/hbase-shell/src/main/ruby/shell/commands/create.rb index b82b2bfc346d..897e8a744b9f 100644 --- a/hbase-shell/src/main/ruby/shell/commands/create.rb +++ b/hbase-shell/src/main/ruby/shell/commands/create.rb @@ -45,7 +45,7 @@ def help hbase> create 'ns1:t1', 'f1', SPLITS => ['10', '20', '30', '40'] hbase> create 't1', 'f1', SPLITS => ['10', '20', '30', '40'] - hbase> create 't1', 'f1', SPLITS_FILE => 'splits.txt', OWNER => 'johndoe' + hbase> create 't1', 'f1', SPLITS_FILE => 'splits.txt' hbase> create 't1', {NAME => 'f1', VERSIONS => 5}, METADATA => { 'mykey' => 'myvalue' } hbase> # Optionally pre-split the table into NUMREGIONS, using hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname) diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index fac52ede51b7..64a4a8b425c6 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -426,7 +426,7 @@ def teardown define_test "create should fail without columns when called with options" do drop_test_table(@create_test_name) assert_raise(ArgumentError) do - command(:create, @create_test_name, { OWNER => 'a' }) + command(:create, @create_test_name, { VERSIONS => '1' }) end end @@ -460,7 +460,6 @@ def teardown define_test "create should be able to set table options" do drop_test_table(@create_test_name) command(:create, @create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, - OWNER => '987654321', PRIORITY => '77', FLUSH_POLICY => 'org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy', REGION_MEMSTORE_REPLICATION => 'TRUE', @@ -470,7 +469,6 @@ def teardown MERGE_ENABLED => 'false') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_match(/12345678/, admin.describe(@create_test_name)) - assert_match(/987654321/, admin.describe(@create_test_name)) assert_match(/77/, admin.describe(@create_test_name)) assert_match(/'COMPACTION_ENABLED' => 'false'/, admin.describe(@create_test_name)) assert_match(/'SPLIT_ENABLED' => 'false'/, admin.describe(@create_test_name)) @@ -484,9 +482,8 @@ def teardown define_test "create should ignore table_att" do drop_test_table(@create_test_name) - command(:create, @create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321') + command(:create, @create_test_name, 'a', 'b', METHOD => 'table_att') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) - assert_match(/987654321/, admin.describe(@create_test_name)) end define_test "create should work with SPLITALGO" do From 485da758bb36adb7b42a8e47fb915dcb7da6c626 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Wed, 28 Oct 2020 14:52:27 +0800 Subject: [PATCH 151/769] HBASE-25176 MasterStoppedException should be moved to hbase-client module (#2538) Signed-off-by: Duo Zhang --- .../exceptions/MasterStoppedException.java | 34 +++++++++++++++++++ .../apache/hadoop/hbase/master/HMaster.java | 7 +--- 2 files changed, 35 insertions(+), 6 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java new file mode 100644 index 000000000000..1ed5b55410ff --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.exceptions; + +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Thrown when the master is stopped + */ +@InterfaceAudience.Private +public class MasterStoppedException extends DoNotRetryIOException { + + private static final long serialVersionUID = -4284604435898100365L; + + public MasterStoppedException() { + super(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 575feae4c75f..86d3983c5677 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.exceptions.MasterStoppedException; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.http.InfoServer; @@ -2853,12 +2854,6 @@ protected void checkServiceStarted() throws ServerNotRunningYetException { } } - public static class MasterStoppedException extends DoNotRetryIOException { - MasterStoppedException() { - super(); - } - } - void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException, MasterNotRunningException, MasterStoppedException { checkServiceStarted(); From 17f9aded2c8f1a9d25e32bd888b9ca5c6598a535 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Wed, 28 Oct 2020 14:58:28 +0800 Subject: [PATCH 152/769] HBASE-25201 YouAreDeadException should be moved to hbase-server module (#2581) Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/YouAreDeadException.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename {hbase-client => hbase-server}/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java (90%) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java similarity index 90% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java index 0c44b9a2cc42..6ba719a4acb1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -24,9 +24,9 @@ import org.apache.yetus.audience.InterfaceStability; /** - * This exception is thrown by the master when a region server reports and is - * already being processed as dead. This can happen when a region server loses - * its session but didn't figure it yet. + * This exception is thrown by the master when a region server reports and is already being + * processed as dead. This can happen when a region server loses its session but didn't figure it + * yet. */ @SuppressWarnings("serial") @InterfaceAudience.Private From 735689d0f7d346f09f5de0a999e340c776ed82d6 Mon Sep 17 00:00:00 2001 From: Minji Kim Date: Thu, 29 Oct 2020 01:17:31 +0900 Subject: [PATCH 153/769] HBASE-25223 Use try-with-resources statement (#2592) Signed-off-by: Wei-Chiu Chuang Signed-off-by: Viraj Jasani Signed-off-by: Duo Zhang Signed-off-by: stack --- .../hadoop/hbase/snapshot/CreateSnapshot.java | 23 +++++----------- .../snapshot/SnapshotDescriptionUtils.java | 26 +++++-------------- .../hbase/snapshot/SnapshotManifest.java | 11 ++------ .../hbase/snapshot/SnapshotManifestV2.java | 17 ++++-------- 4 files changed, 19 insertions(+), 58 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java index d0fc80337f85..f8e54c9c459c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java @@ -67,24 +67,13 @@ protected void processOptions(CommandLine cmd) { @Override protected int doWork() throws Exception { - Connection connection = null; - Admin admin = null; - try { - connection = ConnectionFactory.createConnection(getConf()); - admin = connection.getAdmin(); - admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); } catch (Exception e) { - System.err.println("failed to take the snapshot: " + e.getMessage()); - return -1; - } finally { - if (admin != null) { - admin.close(); - } - if (connection != null) { - connection.close(); - } + System.err.println("failed to take the snapshot: " + e.getMessage()); + return -1; } - return 0; + return 0; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index b54eab1372a3..c059792ca68e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -343,13 +343,8 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD FsPermission perms = CommonFSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY); Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - try { - FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true); - try { - snapshot.writeTo(out); - } finally { - out.close(); - } + try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)){ + snapshot.writeTo(out); } catch (IOException e) { // if we get an exception, try to remove the snapshot info if (!fs.delete(snapshotInfo, false)) { @@ -370,15 +365,8 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) throws CorruptedSnapshotException { Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE); - try { - FSDataInputStream in = null; - try { - in = fs.open(snapshotInfo); - SnapshotDescription desc = SnapshotDescription.parseFrom(in); - return desc; - } finally { - if (in != null) in.close(); - } + try (FSDataInputStream in = fs.open(snapshotInfo)){ + return SnapshotDescription.parseFrom(in); } catch (IOException e) { throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e); } @@ -434,10 +422,8 @@ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDes } public static boolean isSecurityAvailable(Configuration conf) throws IOException { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - try (Admin admin = conn.getAdmin()) { - return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); - } + try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { + return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 61bf192eb894..9df33e131327 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -582,11 +582,8 @@ private void convertToV2SingleManifest() throws IOException { */ private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException { - FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME)); - try { + try (FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { manifest.writeTo(stream); - } finally { - stream.close(); } } @@ -594,9 +591,7 @@ private void writeDataManifest(final SnapshotDataManifest manifest) * Read the SnapshotDataManifest file */ private SnapshotDataManifest readDataManifest() throws IOException { - FSDataInputStream in = null; - try { - in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME)); + try (FSDataInputStream in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME))) { CodedInputStream cin = CodedInputStream.newInstance(in); cin.setSizeLimit(manifestSizeLimit); return SnapshotDataManifest.parseFrom(cin); @@ -604,8 +599,6 @@ private SnapshotDataManifest readDataManifest() throws IOException { return null; } catch (InvalidProtocolBufferException e) { throw new CorruptedSnapshotException("unable to parse data manifest " + e.getMessage(), e); - } finally { - if (in != null) in.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index 4f3df2fddc90..ae914f69b5cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -93,12 +93,9 @@ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOEx FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf); if (workingDirFs.exists(snapshotDir)) { SnapshotRegionManifest manifest = region.build(); - FSDataOutputStream stream = workingDirFs.create( - getRegionManifestPath(snapshotDir, manifest)); - try { + try (FSDataOutputStream stream = workingDirFs.create( + getRegionManifestPath(snapshotDir, manifest))) { manifest.writeTo(stream); - } finally { - stream.close(); } } else { LOG.warn("can't write manifest without parent dir, maybe it has been deleted by master?"); @@ -157,14 +154,10 @@ public boolean accept(Path path) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { - FSDataInputStream stream = fs.open(st.getPath()); - CodedInputStream cin = CodedInputStream.newInstance(stream); - cin.setSizeLimit(manifestSizeLimit); - - try { + try (FSDataInputStream stream = fs.open(st.getPath())) { + CodedInputStream cin = CodedInputStream.newInstance(stream); + cin.setSizeLimit(manifestSizeLimit); return SnapshotRegionManifest.parseFrom(cin); - } finally { - stream.close(); } } }); From bb4a9d335f3eb9cc48d757f97d94aa388c8203fb Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Wed, 28 Oct 2020 18:08:05 +0100 Subject: [PATCH 154/769] HBASE-25224 Maximize sleep for checking meta and namespace regions availability (#2593) Signed-off-by: Michael Stack --- .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 86d3983c5677..58a805334f36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1253,7 +1253,7 @@ private boolean isRegionOnline(RegionInfo ri) { ri.getRegionNameAsString(), rs, optProc.isPresent()); // Check once-a-minute. if (rc == null) { - rc = new RetryCounterFactory(1000).create(); + rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create(); } Threads.sleep(rc.getBackoffTimeAndIncrementAttempts()); } From 259fe1984ae52ab2dd869a765dff518cadcef7f3 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 29 Oct 2020 09:21:18 -0700 Subject: [PATCH 155/769] HBASE-24845 Git/Jira Release Audit: limit branches when building audit db (#2238) Populating the audit database with release tag information from git is time consuming. Until that's sorted out, give the user a flag for limiting which branches they want to be reviewed. Signed-off-by: Andrew Purtell --- dev-support/git-jira-release-audit/README.md | 9 +++++++-- .../git-jira-release-audit/git_jira_release_audit.py | 11 ++++++++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/dev-support/git-jira-release-audit/README.md b/dev-support/git-jira-release-audit/README.md index 396128ad55df..6ea575e16fd3 100644 --- a/dev-support/git-jira-release-audit/README.md +++ b/dev-support/git-jira-release-audit/README.md @@ -62,6 +62,7 @@ usage: git_jira_release_audit.py [-h] [--populate-from-git POPULATE_FROM_GIT] [--release-line-regexp RELEASE_LINE_REGEXP] [--parse-release-tags PARSE_RELEASE_TAGS] [--fallback-actions-path FALLBACK_ACTIONS_PATH] + [--branch-filter-regexp BRANCH_FILTER_REGEXP] [--jira-url JIRA_URL] --branch-1-fix-version BRANCH_1_FIX_VERSION --branch-2-fix-version BRANCH_2_FIX_VERSION @@ -119,6 +120,9 @@ Interactions with the Git repo: --fallback-actions-path FALLBACK_ACTIONS_PATH Path to a file containing _DB.Actions applicable to specific git shas. (default: fallback_actions.csv) + --branch-filter-regexp BRANCH_FILTER_REGEXP + Limit repo parsing to branch names that match this + filter expression. (default: .*) --branch-1-fix-version BRANCH_1_FIX_VERSION The Jira fixVersion used to indicate an issue is committed to the specified release line branch @@ -175,8 +179,9 @@ fetch from Jira 100%|███████████████████ Optionally, the database can be build to include release tags, by specifying `--parse-release-tags=true`. This is more time-consuming, but is necessary for -auditing discrepancies between git and Jira. Running the same command but -including this flag looks like this: +auditing discrepancies between git and Jira. Optionally, limit the branches +under consideration by specifying a regex filter with `--branch-filter-regexp`. +Running the same command but including this flag looks like this: ```shell script origin/branch-1 100%|███████████████████████████████████████| 4084/4084 [08:58<00:00, 7.59 commit/s] diff --git a/dev-support/git-jira-release-audit/git_jira_release_audit.py b/dev-support/git-jira-release-audit/git_jira_release_audit.py index db2788d081d0..358dfd533502 100644 --- a/dev-support/git-jira-release-audit/git_jira_release_audit.py +++ b/dev-support/git-jira-release-audit/git_jira_release_audit.py @@ -199,13 +199,14 @@ class _RepoReader: _identify_amend_jira_id_pattern = re.compile(r'^amend (.+)', re.IGNORECASE) def __init__(self, db, fallback_actions_path, remote_name, development_branch, - release_line_regexp, parse_release_tags, **_kwargs): + release_line_regexp, branch_filter_regexp, parse_release_tags, **_kwargs): self._db = db self._repo = _RepoReader._open_repo() self._fallback_actions = _RepoReader._load_fallback_actions(fallback_actions_path) self._remote_name = remote_name self._development_branch = development_branch self._release_line_regexp = release_line_regexp + self._branch_filter_regexp = branch_filter_regexp self._parse_release_tags = parse_release_tags @property @@ -364,6 +365,10 @@ def populate_db_release_branch(self, origin_commit, release_branch): release_branch (str): The name of the ref whose history is to be parsed. """ global MANAGER + branch_filter_pattern = re.compile('%s/%s' % (self._remote_name, self._branch_filter_regexp)) + if not branch_filter_pattern.match(release_branch): + return + commits = list(self._repo.iter_commits( "%s...%s" % (origin_commit.hexsha, release_branch), reverse=True)) LOG.info("%s has %d commits since its origin at %s.", release_branch, len(commits), @@ -638,6 +643,10 @@ def _build_first_pass_parser(): '--fallback-actions-path', help='Path to a file containing _DB.Actions applicable to specific git shas.', default='fallback_actions.csv') + git_repo_group.add_argument( + '--branch-filter-regexp', + help='Limit repo parsing to branch names that match this filter expression.', + default=r'.*') jira_group = parser.add_argument_group('Interactions with Jira') jira_group.add_argument( '--jira-url', From 35b344c337c9c7dfd48f59de9bb5cc768c7bed78 Mon Sep 17 00:00:00 2001 From: GeorryHuang <215175212@qq.com> Date: Fri, 30 Oct 2020 04:09:18 +0800 Subject: [PATCH 156/769] HBASE-25090 CompactionConfiguration logs unrealistic store file sizes (#2595) Signed-off-by: stack --- .../regionserver/compactions/CompactionConfiguration.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index dbc5b1fea1b1..75966b9e7467 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -180,7 +180,9 @@ public class CompactionConfiguration { @Override public String toString() { return String.format( - "size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" + "size [minCompactSize:%s, maxCompactSize:%s, offPeakMaxCompactSize:%s);" + + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" + + " ratio %f; off-peak ratio %f; throttle point %d;" + " major period %d, major jitter %f, min locality to compact %f;" + " tiered compaction: max_age %d, incoming window min %d," + " compaction policy for tiered window %s, single output for minor %b," From 12d039701577479f41783a706420bb9c974d444f Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Wed, 30 Sep 2020 16:48:01 -0700 Subject: [PATCH 157/769] HBASE-24419 Normalizer merge plans should consider more than 2 regions when possible The core change here is to the loop in `SimpleRegionNormalizer#computeMergeNormalizationPlans`. It's a nested loop that walks the table's region chain once, looking for contiguous sequences of regions that meet the criteria for merge. The outer loop tracks the starting point of the next sequence, the inner loop looks for the end of that sequence. A single sequence becomes an instance of `MergeNormalizationPlan`. Signed-off-by: Huaxiang Sun --- .../apache/hadoop/hbase/MatcherPredicate.java | 65 +++++++ .../normalizer/MergeNormalizationPlan.java | 6 + .../normalizer/NormalizationTarget.java | 3 +- .../normalizer/SimpleRegionNormalizer.java | 82 ++++++--- .../TestSimpleRegionNormalizer.java | 64 ++++++- .../TestSimpleRegionNormalizerOnCluster.java | 167 +++++++++++------- 6 files changed, 287 insertions(+), 100 deletions(-) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java new file mode 100644 index 000000000000..695c026992ac --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.function.Supplier; +import org.apache.yetus.audience.InterfaceAudience; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.StringDescription; + +/** + * An implementation of {@link Waiter.ExplainingPredicate} that uses Hamcrest {@link Matcher} for + * both predicate evaluation and explanation. + * + * @param The type of value to be evaluated via {@link Matcher}. + */ +@InterfaceAudience.Private +public class MatcherPredicate implements Waiter.ExplainingPredicate { + + private final String reason; + private final Supplier supplier; + private final Matcher matcher; + private T currentValue; + + public MatcherPredicate(final Supplier supplier, final Matcher matcher) { + this("", supplier, matcher); + } + + public MatcherPredicate(final String reason, final Supplier supplier, + final Matcher matcher) { + this.reason = reason; + this.supplier = supplier; + this.matcher = matcher; + this.currentValue = null; + } + + @Override public boolean evaluate() { + currentValue = supplier.get(); + return matcher.matches(currentValue); + } + + @Override public String explainFailure() { + final Description description = new StringDescription() + .appendText(reason) + .appendText("\nExpected: ").appendDescriptionOf(matcher) + .appendText("\n but: "); + matcher.describeMismatch(currentValue, description); + return description.toString(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java index 677b9ec8052e..f5a72863fe8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java @@ -96,6 +96,12 @@ static class Builder { private final List normalizationTargets = new LinkedList<>(); + public Builder setTargets(final List targets) { + normalizationTargets.clear(); + normalizationTargets.addAll(targets); + return this; + } + public Builder addTarget(final RegionInfo regionInfo, final long regionSizeMb) { normalizationTargets.add(new NormalizationTarget(regionInfo, regionSizeMb)); return this; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java index 9e4b3f426403..95490288cef9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import java.util.Objects; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -33,7 +34,7 @@ class NormalizationTarget { private final long regionSizeMb; NormalizationTarget(final RegionInfo regionInfo, final long regionSizeMb) { - this.regionInfo = regionInfo; + this.regionInfo = Objects.requireNonNull(regionInfo); this.regionSizeMb = regionSizeMb; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index a641a0aa25b7..062e401ba812 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import static org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils.isEmpty; import java.io.IOException; import java.time.Instant; import java.time.Period; import java.util.ArrayList; import java.util.Collections; +import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.function.BooleanSupplier; @@ -41,7 +43,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** * Simple implementation of region normalizer. Logic in use: @@ -77,7 +78,7 @@ class SimpleRegionNormalizer implements RegionNormalizer { private boolean mergeEnabled; private int minRegionCount; private Period mergeMinRegionAge; - private int mergeMinRegionSizeMb; + private long mergeMinRegionSizeMb; public SimpleRegionNormalizer() { splitEnabled = DEFAULT_SPLIT_ENABLED; @@ -124,10 +125,10 @@ private static Period parseMergeMinRegionAge(final Configuration conf) { return Period.ofDays(settledValue); } - private static int parseMergeMinRegionSizeMb(final Configuration conf) { - final int parsedValue = - conf.getInt(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB); - final int settledValue = Math.max(0, parsedValue); + private static long parseMergeMinRegionSizeMb(final Configuration conf) { + final long parsedValue = + conf.getLong(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB); + final long settledValue = Math.max(0, parsedValue); if (parsedValue != settledValue) { warnInvalidValue(MERGE_MIN_REGION_SIZE_MB_KEY, parsedValue, settledValue); } @@ -171,7 +172,7 @@ public Period getMergeMinRegionAge() { /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_SIZE_MB_KEY}. */ - public int getMergeMinRegionSizeMb() { + public long getMergeMinRegionSizeMb() { return mergeMinRegionSizeMb; } @@ -198,7 +199,7 @@ public List computePlansForTable(final TableName table) { } final NormalizeContext ctx = new NormalizeContext(table); - if (CollectionUtils.isEmpty(ctx.getTableRegions())) { + if (isEmpty(ctx.getTableRegions())) { return Collections.emptyList(); } @@ -251,7 +252,7 @@ private boolean proceedWithMergePlanning() { * Also make sure tableRegions contains regions of the same table */ private double getAverageRegionSizeMb(final List tableRegions) { - if (CollectionUtils.isEmpty(tableRegions)) { + if (isEmpty(tableRegions)) { throw new IllegalStateException( "Cannot calculate average size of a table without any regions."); } @@ -315,35 +316,60 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r * towards target average or target region count. */ private List computeMergeNormalizationPlans(final NormalizeContext ctx) { - if (ctx.getTableRegions().size() < minRegionCount) { + if (isEmpty(ctx.getTableRegions()) || ctx.getTableRegions().size() < minRegionCount) { LOG.debug("Table {} has {} regions, required min number of regions for normalizer to run" + " is {}, not computing merge plans.", ctx.getTableName(), ctx.getTableRegions().size(), minRegionCount); return Collections.emptyList(); } - final double avgRegionSizeMb = ctx.getAverageRegionSizeMb(); + final long avgRegionSizeMb = (long) ctx.getAverageRegionSizeMb(); + if (avgRegionSizeMb < mergeMinRegionSizeMb) { + return Collections.emptyList(); + } LOG.debug("Computing normalization plan for table {}. average region size: {}, number of" + " regions: {}.", ctx.getTableName(), avgRegionSizeMb, ctx.getTableRegions().size()); - final List plans = new ArrayList<>(); - for (int candidateIdx = 0; candidateIdx < ctx.getTableRegions().size() - 1; candidateIdx++) { - final RegionInfo current = ctx.getTableRegions().get(candidateIdx); - final RegionInfo next = ctx.getTableRegions().get(candidateIdx + 1); - if (skipForMerge(ctx.getRegionStates(), current) - || skipForMerge(ctx.getRegionStates(), next)) { - continue; + // this nested loop walks the table's region chain once, looking for contiguous sequences of + // regions that meet the criteria for merge. The outer loop tracks the starting point of the + // next sequence, the inner loop looks for the end of that sequence. A single sequence becomes + // an instance of MergeNormalizationPlan. + + final List plans = new LinkedList<>(); + final List rangeMembers = new LinkedList<>(); + long sumRangeMembersSizeMb; + int current = 0; + for (int rangeStart = 0; + rangeStart < ctx.getTableRegions().size() - 1 && current < ctx.getTableRegions().size();) { + // walk the region chain looking for contiguous sequences of regions that can be merged. + rangeMembers.clear(); + sumRangeMembersSizeMb = 0; + for (current = rangeStart; current < ctx.getTableRegions().size(); current++) { + final RegionInfo regionInfo = ctx.getTableRegions().get(current); + final long regionSizeMb = getRegionSizeMB(regionInfo); + if (skipForMerge(ctx.getRegionStates(), regionInfo)) { + // this region cannot participate in a range. resume the outer loop. + rangeStart = Math.max(current, rangeStart + 1); + break; + } + if (rangeMembers.isEmpty() // when there are no range members, seed the range with whatever + // we have. this way we're prepared in case the next region is + // 0-size. + || regionSizeMb == 0 // always add an empty region to the current range. + || (regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb)) { // add the current region + // to the range when + // there's capacity + // remaining. + rangeMembers.add(new NormalizationTarget(regionInfo, regionSizeMb)); + sumRangeMembersSizeMb += regionSizeMb; + continue; + } + // we have accumulated enough regions to fill a range. resume the outer loop. + rangeStart = Math.max(current, rangeStart + 1); + break; } - final long currentSizeMb = getRegionSizeMB(current); - final long nextSizeMb = getRegionSizeMB(next); - // always merge away empty regions when they present themselves. - if (currentSizeMb == 0 || nextSizeMb == 0 || currentSizeMb + nextSizeMb < avgRegionSizeMb) { - final MergeNormalizationPlan plan = new MergeNormalizationPlan.Builder() - .addTarget(current, currentSizeMb) - .addTarget(next, nextSizeMb) - .build(); - plans.add(plan); - candidateIdx++; + if (rangeMembers.size() > 1) { + plans.add(new MergeNormalizationPlan.Builder().setTargets(rangeMembers).build()); } } return plans; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index f263cbc4fdfd..33b32972542e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -33,6 +33,7 @@ import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.RETURNS_DEEP_STUBS; import static org.mockito.Mockito.when; @@ -225,7 +226,7 @@ public void testSplitOfLargeRegion() { } @Test - public void testSplitWithTargetRegionSize() throws Exception { + public void testWithTargetRegionSize() throws Exception { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 6); final Map regionSizes = @@ -251,8 +252,6 @@ public void testSplitWithTargetRegionSize() throws Exception { new MergeNormalizationPlan.Builder() .addTarget(regionInfos.get(0), 20) .addTarget(regionInfos.get(1), 40) - .build(), - new MergeNormalizationPlan.Builder() .addTarget(regionInfos.get(2), 60) .addTarget(regionInfos.get(3), 80) .build())); @@ -392,7 +391,7 @@ public void testHonorsMergeMinRegionSize() { } @Test - public void testMergeEmptyRegions() { + public void testMergeEmptyRegions0() { conf.setBoolean(SPLIT_ENABLED_KEY, false); conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); final TableName tableName = name.getTableName(); @@ -418,6 +417,63 @@ public void testMergeEmptyRegions() { .build())); } + @Test + public void testMergeEmptyRegions1() { + conf.setBoolean(SPLIT_ENABLED_KEY, false); + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 8); + final Map regionSizes = + createRegionSizesMap(regionInfos, 0, 1, 10, 0, 9, 0, 10, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + + assertFalse(normalizer.isSplitEnabled()); + assertEquals(0, normalizer.getMergeMinRegionSizeMb()); + assertThat(normalizer.computePlansForTable(tableName), contains( + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 1) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 10) + .addTarget(regionInfos.get(3), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(4), 9) + .addTarget(regionInfos.get(5), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(6), 10) + .addTarget(regionInfos.get(7), 0) + .build())); + } + + @Test + public void testSplitAndMultiMerge() { + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 8); + final Map regionSizes = + createRegionSizesMap(regionInfos, 3, 1, 1, 30, 9, 3, 1, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + + assertTrue(normalizer.isMergeEnabled()); + assertTrue(normalizer.isSplitEnabled()); + assertEquals(0, normalizer.getMergeMinRegionSizeMb()); + assertThat(normalizer.computePlansForTable(tableName), contains( + new SplitNormalizationPlan(regionInfos.get(3), 30), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 3) + .addTarget(regionInfos.get(1), 1) + .addTarget(regionInfos.get(2), 1) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(5), 3) + .addTarget(regionInfos.get(6), 1) + .addTarget(regionInfos.get(7), 0) + .build())); + } + // This test is to make sure that normalizer is only going to merge adjacent regions. @Test public void testNormalizerCannotMergeNonAdjacentRegions() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java index f5feb59ca329..3cc9168b4343 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -29,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MatcherPredicate; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ServerName; @@ -55,6 +61,8 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LoadTestKVGenerator; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -144,7 +152,7 @@ public void testHonorsNormalizerTableSetting() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because normalizer has not been enabled on it. @@ -161,7 +169,7 @@ public void testHonorsNormalizerTableSetting() throws Exception { tn2RegionCount, getRegionCount(tn2)); LOG.debug("waiting for t3 to settle..."); - waitForTableRegionCount(tn3, tn3RegionCount); + waitForTableRegionCount(tn3, comparesEqualTo(tn3RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); @@ -198,7 +206,7 @@ void testRegionNormalizationSplit(boolean limitedByQuota) throws Exception { currentRegionCount, getRegionCount(tableName)); } else { - waitForTableSplit(tableName, currentRegionCount + 1); + waitForTableRegionCount(tableName, greaterThanOrEqualTo(currentRegionCount + 1)); assertEquals( tableName + " should have split.", currentRegionCount + 1, @@ -216,7 +224,7 @@ public void testRegionNormalizationMerge() throws Exception { final int currentRegionCount = createTableBegsMerge(tableName); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); - waitForTableMerge(tableName, currentRegionCount - 1); + waitForTableRegionCount(tableName, lessThanOrEqualTo(currentRegionCount - 1)); assertEquals( tableName + " should have merged.", currentRegionCount - 1, @@ -242,7 +250,7 @@ public void testHonorsNamespaceFilter() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because it's not a member of the target namespace. @@ -250,7 +258,7 @@ public void testHonorsNamespaceFilter() throws Exception { tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); - waitForTableRegionCount(tn2, tn2RegionCount); + waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); @@ -271,7 +279,7 @@ public void testHonorsPatternFilter() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because it fails filter. @@ -279,7 +287,7 @@ public void testHonorsPatternFilter() throws Exception { tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); - waitForTableRegionCount(tn2, tn2RegionCount); + waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); @@ -300,7 +308,7 @@ public void testHonorsNameFilter() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn3RegionCount number of regions because it fails filter: @@ -308,13 +316,33 @@ public void testHonorsNameFilter() throws Exception { tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); - waitForTableRegionCount(tn2, tn2RegionCount); + waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); } } + /** + * A test for when a region is the target of both a split and a merge plan. Does not define + * expected behavior, only that some change is applied to the table. + */ + @Test + public void testTargetOfSplitAndMerge() throws Exception { + final TableName tn = TableName.valueOf(name.getMethodName()); + try { + final int tnRegionCount = createTableTargetOfSplitAndMerge(tn); + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize().get()); + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( + "expected " + tn + " to split or merge (probably split)", + () -> getRegionCountUnchecked(tn), + not(comparesEqualTo(tnRegionCount)))); + } finally { + dropIfExists(tn); + } + } + private static TableName buildTableNameForQuotaTest(final String methodName) throws Exception { String nsp = "np2"; NamespaceDescriptor nspDesc = @@ -326,74 +354,30 @@ private static TableName buildTableNameForQuotaTest(final String methodName) thr } private static void waitForSkippedSplits(final HMaster master, - final long existingSkippedSplitCount) throws Exception { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override public String explainFailure() { - return "waiting to observe split attempt and skipped."; - } - @Override public boolean evaluate() { - final long skippedSplitCount = master.getRegionNormalizerManager() - .getSkippedCount(PlanType.SPLIT); - return skippedSplitCount > existingSkippedSplitCount; - } - }); + final long existingSkippedSplitCount) { + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( + "waiting to observe split attempt and skipped.", + () -> master.getRegionNormalizerManager().getSkippedCount(PlanType.SPLIT), + Matchers.greaterThan(existingSkippedSplitCount))); } private static void waitForTableRegionCount(final TableName tableName, - final int targetRegionCount) throws IOException { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override - public String explainFailure() { - return "expected " + targetRegionCount + " number of regions for table " + tableName; - } - - @Override - public boolean evaluate() throws IOException { - final int currentRegionCount = getRegionCount(tableName); - return currentRegionCount == targetRegionCount; - } - }); - } - - private static void waitForTableSplit(final TableName tableName, final int targetRegionCount) - throws IOException { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override - public String explainFailure() { - return "expected normalizer to split region."; - } - - @Override - public boolean evaluate() throws IOException { - final int currentRegionCount = getRegionCount(tableName); - return currentRegionCount >= targetRegionCount; - } - }); - } - - private static void waitForTableMerge(final TableName tableName, final int targetRegionCount) - throws IOException { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override - public String explainFailure() { - return "expected normalizer to merge regions."; - } - - @Override - public boolean evaluate() throws IOException { - final int currentRegionCount = getRegionCount(tableName); - return currentRegionCount <= targetRegionCount; - } - }); + Matcher matcher) { + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( + "region count for table " + tableName + " does not match expected", + () -> getRegionCountUnchecked(tableName), + matcher)); } private static List generateTestData(final TableName tableName, final int... regionSizesMb) throws IOException { final List generatedRegions; final int numRegions = regionSizesMb.length; + LOG.debug("generating test data into {}, {} regions of sizes (mb) {}", tableName, numRegions, + regionSizesMb); try (Table ignored = TEST_UTIL.createMultiRegionTable(tableName, FAMILY_NAME, numRegions)) { // Need to get sorted list of regions here - generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName); + generatedRegions = new ArrayList<>(TEST_UTIL.getHBaseCluster().getRegions(tableName)); generatedRegions.sort(Comparator.comparing(HRegion::getRegionInfo, RegionInfo.COMPARATOR)); assertEquals(numRegions, generatedRegions.size()); for (int i = 0; i < numRegions; i++) { @@ -407,6 +391,7 @@ private static List generateTestData(final TableName tableName, private static void generateTestData(Region region, int numRows) throws IOException { // generating 1Mb values + LOG.debug("writing {}mb to {}", numRows, region); LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(1024 * 1024, 1024 * 1024); for (int i = 0; i < numRows; ++i) { byte[] key = Bytes.add(region.getRegionInfo().getStartKey(), Bytes.toBytes(i)); @@ -513,6 +498,46 @@ private static int createTableBegsMerge(final TableName tableName) throws Except return 5; } + /** + * Create a table with 4 regions, having region sizes so as to provoke a split of the largest + * region and a merge of an empty region into the largest. + *

      + *
    • total table size: 14
    • + *
    • average region size: 3.5
    • + *
    + */ + private static int createTableTargetOfSplitAndMerge(final TableName tableName) throws Exception { + final int[] regionSizesMb = { 10, 0, 2, 2 }; + final List generatedRegions = generateTestData(tableName, regionSizesMb); + assertEquals(4, getRegionCount(tableName)); + admin.flush(tableName).get(); + + final TableDescriptor td = TableDescriptorBuilder + .newBuilder(admin.getDescriptor(tableName).get()) + .setNormalizationEnabled(true) + .build(); + admin.modifyTable(td).get(); + + // make sure relatively accurate region statistics are available for the test table. use + // the last/largest region as clue. + LOG.debug("waiting for region statistics to settle."); + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { + @Override public String explainFailure() { + return "expected largest region to be >= 10mb."; + } + @Override public boolean evaluate() { + for (int i = 0; i < generatedRegions.size(); i++) { + final RegionInfo regionInfo = generatedRegions.get(i).getRegionInfo(); + if (!(getRegionSizeMB(master, regionInfo) >= regionSizesMb[i])) { + return false; + } + } + return true; + } + }); + return 4; + } + private static void dropIfExists(final TableName tableName) throws Exception { if (tableName != null && admin.tableExists(tableName).get()) { if (admin.isTableEnabled(tableName).get()) { @@ -527,4 +552,12 @@ private static int getRegionCount(TableName tableName) throws IOException { return locator.getAllRegionLocations().size(); } } + + private static int getRegionCountUnchecked(final TableName tableName) { + try { + return getRegionCount(tableName); + } catch (IOException e) { + throw new RuntimeException(e); + } + } } From eee1cf7ff63117a0ee18eecb34b4938b5b8cff55 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 16 Apr 2020 09:21:52 -0700 Subject: [PATCH 158/769] HBASE-24200 Upgrade to Yetus 0.12.0 Signed-off-by: Sean Busbey Signed-off-by: Duo Zhang --- dev-support/Jenkinsfile | 2 +- dev-support/Jenkinsfile_GitHub | 2 +- dev-support/create-release/do-release.sh | 4 ++-- dev-support/create-release/hbase-rm/Dockerfile | 2 +- dev-support/jenkins_precommit_jira_yetus.sh | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 9f23a58873cd..f3de8edffcbe 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -31,7 +31,7 @@ pipeline { disableConcurrentBuilds() } environment { - YETUS_RELEASE = '0.11.1' + YETUS_RELEASE = '0.12.0' // where we'll write everything from different steps. Need a copy here so the final step can check for success/failure. OUTPUT_DIR_RELATIVE_GENERAL = 'output-general' OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7' diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index a15ee9e84957..d25386717739 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -37,7 +37,7 @@ pipeline { DOCKERFILE_REL = "${SRC_REL}/dev-support/docker/Dockerfile" YETUS_DRIVER_REL = "${SRC_REL}/dev-support/jenkins_precommit_github_yetus.sh" // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION = 'rel/0.11.1' + YETUS_VERSION = 'rel/0.12.0' GENERAL_CHECK_PLUGINS = 'all,-compile,-javac,-javadoc,-jira,-shadedjars,-unit' JDK_SPECIFIC_PLUGINS = 'compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars,unit' // output from surefire; sadly the archive function in yetus only works on file names. diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index ebab9335cc27..9500801c247b 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -118,9 +118,9 @@ function should_build { if should_build "tag" && [ "$SKIP_TAG" = 0 ]; then if [ -z "${YETUS_HOME}" ] && [ "${RUNNING_IN_DOCKER}" != "1" ]; then - declare local_yetus="/opt/apache-yetus/0.11.1/" + declare local_yetus="/opt/apache-yetus/0.12.0/" if [ "$(get_host_os)" = "DARWIN" ]; then - local_yetus="/usr/local/Cellar/yetus/0.11.1/" + local_yetus="/usr/local/Cellar/yetus/0.12.0/" fi YETUS_HOME="$(read_config "YETUS_HOME not defined. Absolute path to local install of Apache Yetus" "${local_yetus}")" export YETUS_HOME diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index 630b8f17332e..2c29974cfbba 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -44,7 +44,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && pip install \ python-dateutil==2.8.1 # Install Apache Yetus -ENV YETUS_VERSION 0.11.1 +ENV YETUS_VERSION 0.12.0 SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN wget -qO- "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ tar xvz -C /opt diff --git a/dev-support/jenkins_precommit_jira_yetus.sh b/dev-support/jenkins_precommit_jira_yetus.sh index 9961c3c98cfc..d721ac8faba6 100755 --- a/dev-support/jenkins_precommit_jira_yetus.sh +++ b/dev-support/jenkins_precommit_jira_yetus.sh @@ -32,7 +32,7 @@ export MAVEN_HOME=/home/jenkins/tools/maven/apache-maven-3.0.5 #export PATH=$PATH:${JAVA_HOME}/bin:${MAVEN_HOME}/bin: export PATH=$PATH:${MAVEN_HOME}/bin: -YETUS_RELEASE=0.11.1 +YETUS_RELEASE=0.12.0 COMPONENT=${WORKSPACE}/component TEST_FRAMEWORK=${WORKSPACE}/test_framework From e3beccf1fcd3dcb1384afd7a98fd2ada0e4e2bc0 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Fri, 30 Oct 2020 05:04:23 +0530 Subject: [PATCH 159/769] HBASE-24977 Meta table shouldn't be modified as read only (#2537) Signed-off-by: stack --- .../hbase/util/TableDescriptorChecker.java | 6 ++++ .../hadoop/hbase/TestHBaseMetaEdit.java | 29 +++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index 906ae454d6a8..c69d38a8ec25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -150,6 +151,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) warnOrThrowExceptionForFailure(logWarn, message, null); } + // Meta table shouldn't be set as read only, otherwise it will impact region assignments + if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) { + warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null); + } + for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { if (hcd.getTimeToLive() <= 0) { String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java index 6977452724d3..33c0f1041b00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -133,4 +135,31 @@ public void testEditMeta() throws IOException { assertTrue(hioe.getMessage().contains("Delete of hbase:meta")); } } + + /** + * Validate whether meta table can be altered as READ only, shouldn't be allowed otherwise it will + * break assignment functionalities. See HBASE-24977. + */ + @Test + public void testAlterMetaWithReadOnly() throws IOException { + Admin admin = UTIL.getAdmin(); + TableDescriptor origMetaTableDesc = admin.getDescriptor(TableName.META_TABLE_NAME); + assertFalse(origMetaTableDesc.isReadOnly()); + TableDescriptor newTD = + TableDescriptorBuilder.newBuilder(origMetaTableDesc).setReadOnly(true).build(); + try { + admin.modifyTable(newTD); + fail("Meta table can't be set as read only"); + } catch (Exception e) { + assertFalse(admin.getDescriptor(TableName.META_TABLE_NAME).isReadOnly()); + } + + // Create a table to check region assignment & meta operation + TableName tableName = TableName.valueOf("tempTable"); + TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setReadOnly(true) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .build(); + UTIL.getAdmin().createTable(td); + UTIL.deleteTable(tableName); + } } From d790bdeddef755751c43f328daaf5aa027bf8cad Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 30 Oct 2020 10:41:56 -0700 Subject: [PATCH 160/769] HBASE-25167 Normalizer support for hot config reloading (#2523) Wire up the `ConfigurationObserver` chain for `RegionNormalizerManager`. The following configuration keys support hot-reloading: * hbase.normalizer.throughput.max_bytes_per_sec * hbase.normalizer.split.enabled * hbase.normalizer.merge.enabled * hbase.normalizer.min.region.count * hbase.normalizer.merge.min_region_age.days * hbase.normalizer.merge.min_region_size.mb Note that support for `hbase.normalizer.period` is not provided here. Support would need to be implemented generally for the `Chore` subsystem. Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani Signed-off-by: Aman Poonia --- .../hbase/conf/ConfigurationManager.java | 27 +-- .../hbase/conf/ConfigurationObserver.java | 4 +- .../hbase/conf/TestConfigurationManager.java | 11 +- .../apache/hadoop/hbase/master/HMaster.java | 1 + .../normalizer/RegionNormalizerManager.java | 26 ++- .../normalizer/RegionNormalizerWorker.java | 41 ++++- .../normalizer/SimpleRegionNormalizer.java | 159 ++++++++++++++---- ...ormalizerManagerConfigurationObserver.java | 110 ++++++++++++ 8 files changed, 319 insertions(+), 60 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java index 511679f5b547..2c36c5308fa3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,28 +21,29 @@ import java.util.Set; import java.util.WeakHashMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Maintains the set of all the classes which would like to get notified * when the Configuration is reloaded from the disk in the Online Configuration * Change mechanism, which lets you update certain configuration properties * on-the-fly, without having to restart the cluster. - * + *

    * If a class has configuration properties which you would like to be able to * change on-the-fly, do the following: - * 1. Implement the {@link ConfigurationObserver} interface. This would require + *

      + *
    1. Implement the {@link ConfigurationObserver} interface. This would require * you to implement the * {@link ConfigurationObserver#onConfigurationChange(Configuration)} * method. This is a callback that is used to notify your class' instance * that the configuration has changed. In this method, you need to check * if the new values for the properties that are of interest to your class * are different from the cached values. If yes, update them. - * + *
      * However, be careful with this. Certain properties might be trivially * mutable online, but others might not. Two properties might be trivially * mutable by themselves, but not when changed together. For example, if a @@ -51,21 +52,23 @@ * yet updated "b", it might make a decision on the basis of a new value of * "a", and an old value of "b". This might introduce subtle bugs. This * needs to be dealt on a case-by-case basis, and this class does not provide - * any protection from such cases. + * any protection from such cases.
    2. * - * 2. Register the appropriate instance of the class with the + *
    3. Register the appropriate instance of the class with the * {@link ConfigurationManager} instance, using the * {@link ConfigurationManager#registerObserver(ConfigurationObserver)} * method. Be careful not to do this in the constructor, as you might cause * the 'this' reference to escape. Use a factory method, or an initialize() - * method which is called after the construction of the object. + * method which is called after the construction of the object.
    4. * - * 3. Deregister the instance using the + *
    5. Deregister the instance using the * {@link ConfigurationManager#deregisterObserver(ConfigurationObserver)} * method when it is going out of scope. In case you are not able to do that * for any reason, it is still okay, since entries for dead observers are * automatically collected during GC. But nonetheless, it is still a good - * practice to deregister your observer, whenever possible. + * practice to deregister your observer, whenever possible.
    6. + *
    + *

    */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -118,8 +121,8 @@ public void notifyAllObservers(Configuration conf) { observer.onConfigurationChange(conf); } } catch (Throwable t) { - LOG.error("Encountered a throwable while notifying observers: " + " of type : " + - observer.getClass().getCanonicalName() + "(" + observer + ")", t); + LOG.error("Encountered a throwable while notifying observers: of type : {}({})", + observer.getClass().getCanonicalName(), observer, t); } } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java index 2370a21af033..0d1d8ce5a783 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,7 @@ /** * Every class that wants to observe changes in Configuration properties, * must implement interface (and also, register itself with the - * ConfigurationManager object. + * {@link ConfigurationManager}. */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java index 20dd02442631..21d74806ba04 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -39,9 +38,9 @@ public class TestConfigurationManager { private static final Logger LOG = LoggerFactory.getLogger(TestConfigurationManager.class); - class DummyConfigurationObserver implements ConfigurationObserver { + static class DummyConfigurationObserver implements ConfigurationObserver { private boolean notifiedOnChange = false; - private ConfigurationManager cm; + private final ConfigurationManager cm; public DummyConfigurationObserver(ConfigurationManager cm) { this.cm = cm; @@ -63,11 +62,11 @@ public void resetNotifiedOnChange() { } public void register() { - this.cm.registerObserver(this); + cm.registerObserver(this); } public void deregister() { - this.cm.deregisterObserver(this); + cm.deregisterObserver(this); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 58a805334f36..f9123046eef2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -785,6 +785,7 @@ private void initializeZKBasedSystemTrackers() this.regionNormalizerManager = RegionNormalizerFactory.createNormalizerManager(conf, zooKeeper, this); + this.configurationManager.registerObserver(regionNormalizerManager); this.regionNormalizerManager.start(); this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java index e818519d6513..b4d16e796731 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java @@ -22,8 +22,11 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -35,7 +38,7 @@ * This class encapsulates the details of the {@link RegionNormalizer} subsystem. */ @InterfaceAudience.Private -public class RegionNormalizerManager { +public class RegionNormalizerManager implements PropagatingConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerManager.class); private final RegionNormalizerTracker regionNormalizerTracker; @@ -48,7 +51,7 @@ public class RegionNormalizerManager { private boolean started = false; private boolean stopped = false; - public RegionNormalizerManager( + RegionNormalizerManager( @NonNull final RegionNormalizerTracker regionNormalizerTracker, @Nullable final RegionNormalizerChore regionNormalizerChore, @Nullable final RegionNormalizerWorkQueue workQueue, @@ -67,6 +70,25 @@ public RegionNormalizerManager( .build()); } + @Override + public void registerChildren(ConfigurationManager manager) { + if (worker != null) { + manager.registerObserver(worker); + } + } + + @Override + public void deregisterChildren(ConfigurationManager manager) { + if (worker != null) { + manager.deregisterObserver(worker); + } + } + + @Override + public void onConfigurationChange(Configuration conf) { + // no configuration managed here directly. + } + public void start() { synchronized (startStopLock) { if (started) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java index 30f9fc25364d..408317a31f87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java @@ -26,6 +26,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -39,7 +42,7 @@ * and executes the resulting {@link NormalizationPlan}s. */ @InterfaceAudience.Private -class RegionNormalizerWorker implements Runnable { +class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnable { private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerWorker.class); static final String RATE_LIMIT_BYTES_PER_SEC_KEY = @@ -70,7 +73,32 @@ class RegionNormalizerWorker implements Runnable { this.rateLimiter = loadRateLimiter(configuration); } + @Override + public void registerChildren(ConfigurationManager manager) { + if (regionNormalizer instanceof ConfigurationObserver) { + final ConfigurationObserver observer = (ConfigurationObserver) regionNormalizer; + manager.registerObserver(observer); + } + } + + @Override + public void deregisterChildren(ConfigurationManager manager) { + if (regionNormalizer instanceof ConfigurationObserver) { + final ConfigurationObserver observer = (ConfigurationObserver) regionNormalizer; + manager.deregisterObserver(observer); + } + } + + @Override + public void onConfigurationChange(Configuration conf) { + rateLimiter.setRate(loadRateLimit(conf)); + } + private static RateLimiter loadRateLimiter(final Configuration configuration) { + return RateLimiter.create(loadRateLimit(configuration)); + } + + private static long loadRateLimit(final Configuration configuration) { long rateLimitBytes = configuration.getLongBytes(RATE_LIMIT_BYTES_PER_SEC_KEY, RATE_UNLIMITED_BYTES); long rateLimitMbs = rateLimitBytes / 1_000_000L; @@ -82,7 +110,7 @@ private static RateLimiter loadRateLimiter(final Configuration configuration) { } LOG.info("Normalizer rate limit set to {}", rateLimitBytes == RATE_UNLIMITED_BYTES ? "unlimited" : rateLimitMbs + " MB/sec"); - return RateLimiter.create(rateLimitMbs); + return rateLimitMbs; } /** @@ -116,6 +144,15 @@ long getMergePlanCount() { return mergePlanCount; } + /** + * Used in test only. This field is exposed to the test, as opposed to tracking the current + * configuration value beside the RateLimiter instance and managing synchronization to keep the + * two in sync. + */ + RateLimiter getRateLimiter() { + return rateLimiter; + } + @Override public void run() { while (true) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 062e401ba812..6d7387b7f11b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.assignment.RegionStates; @@ -56,7 +57,7 @@ * */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -class SimpleRegionNormalizer implements RegionNormalizer { +class SimpleRegionNormalizer implements RegionNormalizer, ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class); static final String SPLIT_ENABLED_KEY = "hbase.normalizer.split.enabled"; @@ -72,25 +73,17 @@ class SimpleRegionNormalizer implements RegionNormalizer { static final String MERGE_MIN_REGION_SIZE_MB_KEY = "hbase.normalizer.merge.min_region_size.mb"; static final int DEFAULT_MERGE_MIN_REGION_SIZE_MB = 1; - private Configuration conf; private MasterServices masterServices; - private boolean splitEnabled; - private boolean mergeEnabled; - private int minRegionCount; - private Period mergeMinRegionAge; - private long mergeMinRegionSizeMb; + private NormalizerConfiguration normalizerConfiguration; public SimpleRegionNormalizer() { - splitEnabled = DEFAULT_SPLIT_ENABLED; - mergeEnabled = DEFAULT_MERGE_ENABLED; - minRegionCount = DEFAULT_MIN_REGION_COUNT; - mergeMinRegionAge = Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS); - mergeMinRegionSizeMb = DEFAULT_MERGE_MIN_REGION_SIZE_MB; + masterServices = null; + normalizerConfiguration = new NormalizerConfiguration(); } @Override public Configuration getConf() { - return conf; + return normalizerConfiguration.getConf(); } @Override @@ -98,12 +91,13 @@ public void setConf(final Configuration conf) { if (conf == null) { return; } - this.conf = conf; - splitEnabled = conf.getBoolean(SPLIT_ENABLED_KEY, DEFAULT_SPLIT_ENABLED); - mergeEnabled = conf.getBoolean(MERGE_ENABLED_KEY, DEFAULT_MERGE_ENABLED); - minRegionCount = parseMinRegionCount(conf); - mergeMinRegionAge = parseMergeMinRegionAge(conf); - mergeMinRegionSizeMb = parseMergeMinRegionSizeMb(conf); + normalizerConfiguration = new NormalizerConfiguration(conf, normalizerConfiguration); + } + + @Override + public void onConfigurationChange(Configuration conf) { + LOG.debug("Updating configuration parameters according to new configuration instance."); + setConf(conf); } private static int parseMinRegionCount(final Configuration conf) { @@ -141,39 +135,46 @@ private static void warnInvalidValue(final String key, final T parsedValue, key, parsedValue, settledValue); } + private static void logConfigurationUpdated(final String key, final T oldValue, + final T newValue) { + if (!Objects.equals(oldValue, newValue)) { + LOG.info("Updated configuration for key '{}' from {} to {}", key, oldValue, newValue); + } + } + /** * Return this instance's configured value for {@value #SPLIT_ENABLED_KEY}. */ public boolean isSplitEnabled() { - return splitEnabled; + return normalizerConfiguration.isSplitEnabled(); } /** * Return this instance's configured value for {@value #MERGE_ENABLED_KEY}. */ public boolean isMergeEnabled() { - return mergeEnabled; + return normalizerConfiguration.isMergeEnabled(); } /** * Return this instance's configured value for {@value #MIN_REGION_COUNT_KEY}. */ public int getMinRegionCount() { - return minRegionCount; + return normalizerConfiguration.getMinRegionCount(); } /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}. */ public Period getMergeMinRegionAge() { - return mergeMinRegionAge; + return normalizerConfiguration.getMergeMinRegionAge(); } /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_SIZE_MB_KEY}. */ public long getMergeMinRegionSizeMb() { - return mergeMinRegionSizeMb; + return normalizerConfiguration.getMergeMinRegionSizeMb(); } @Override @@ -292,8 +293,15 @@ private double getAverageRegionSizeMb(final List tableRegions) { /** * Determine if a {@link RegionInfo} should be considered for a merge operation. + *

    + * Callers beware: for safe concurrency, be sure to pass in the local instance of + * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ - private boolean skipForMerge(final RegionStates regionStates, final RegionInfo regionInfo) { + private boolean skipForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionStates regionStates, + final RegionInfo regionInfo + ) { final RegionState state = regionStates.getRegionState(regionInfo); final String name = regionInfo.getEncodedName(); return @@ -304,10 +312,10 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r () -> !Objects.equals(state.getState(), RegionState.State.OPEN), "skipping merge of region {} because it is not open.", name) || logTraceReason( - () -> !isOldEnoughForMerge(regionInfo), + () -> !isOldEnoughForMerge(normalizerConfiguration, regionInfo), "skipping merge of region {} because it is not old enough.", name) || logTraceReason( - () -> !isLargeEnoughForMerge(regionInfo), + () -> !isLargeEnoughForMerge(normalizerConfiguration, regionInfo), "skipping merge region {} because it is not large enough.", name); } @@ -316,15 +324,16 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r * towards target average or target region count. */ private List computeMergeNormalizationPlans(final NormalizeContext ctx) { - if (isEmpty(ctx.getTableRegions()) || ctx.getTableRegions().size() < minRegionCount) { + final NormalizerConfiguration configuration = normalizerConfiguration; + if (ctx.getTableRegions().size() < configuration.getMinRegionCount()) { LOG.debug("Table {} has {} regions, required min number of regions for normalizer to run" - + " is {}, not computing merge plans.", ctx.getTableName(), ctx.getTableRegions().size(), - minRegionCount); + + " is {}, not computing merge plans.", ctx.getTableName(), + ctx.getTableRegions().size(), configuration.getMinRegionCount()); return Collections.emptyList(); } final long avgRegionSizeMb = (long) ctx.getAverageRegionSizeMb(); - if (avgRegionSizeMb < mergeMinRegionSizeMb) { + if (avgRegionSizeMb < configuration.getMergeMinRegionSizeMb()) { return Collections.emptyList(); } LOG.debug("Computing normalization plan for table {}. average region size: {}, number of" @@ -347,7 +356,7 @@ private List computeMergeNormalizationPlans(final NormalizeCo for (current = rangeStart; current < ctx.getTableRegions().size(); current++) { final RegionInfo regionInfo = ctx.getTableRegions().get(current); final long regionSizeMb = getRegionSizeMB(regionInfo); - if (skipForMerge(ctx.getRegionStates(), regionInfo)) { + if (skipForMerge(configuration, ctx.getRegionStates(), regionInfo)) { // this region cannot participate in a range. resume the outer loop. rangeStart = Math.max(current, rangeStart + 1); break; @@ -419,18 +428,28 @@ private List computeSplitNormalizationPlans(final NormalizeCo * Return {@code true} when {@code regionInfo} has a creation date that is old * enough to be considered for a merge operation, {@code false} otherwise. */ - private boolean isOldEnoughForMerge(final RegionInfo regionInfo) { + private static boolean isOldEnoughForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionInfo regionInfo + ) { final Instant currentTime = Instant.ofEpochMilli(EnvironmentEdgeManager.currentTime()); final Instant regionCreateTime = Instant.ofEpochMilli(regionInfo.getRegionId()); - return currentTime.isAfter(regionCreateTime.plus(mergeMinRegionAge)); + return currentTime.isAfter( + regionCreateTime.plus(normalizerConfiguration.getMergeMinRegionAge())); } /** * Return {@code true} when {@code regionInfo} has a size that is sufficient * to be considered for a merge operation, {@code false} otherwise. + *

    + * Callers beware: for safe concurrency, be sure to pass in the local instance of + * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ - private boolean isLargeEnoughForMerge(final RegionInfo regionInfo) { - return getRegionSizeMB(regionInfo) >= mergeMinRegionSizeMb; + private boolean isLargeEnoughForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionInfo regionInfo + ) { + return getRegionSizeMB(regionInfo) >= normalizerConfiguration.getMergeMinRegionSizeMb(); } private static boolean logTraceReason(final BooleanSupplier predicate, final String fmtWhenTrue, @@ -442,6 +461,74 @@ private static boolean logTraceReason(final BooleanSupplier predicate, final Str return value; } + /** + * Holds the configuration values read from {@link Configuration}. Encapsulation in a POJO + * enables atomic hot-reloading of configs without locks. + */ + private static final class NormalizerConfiguration { + private final Configuration conf; + private final boolean splitEnabled; + private final boolean mergeEnabled; + private final int minRegionCount; + private final Period mergeMinRegionAge; + private final long mergeMinRegionSizeMb; + + private NormalizerConfiguration() { + conf = null; + splitEnabled = DEFAULT_SPLIT_ENABLED; + mergeEnabled = DEFAULT_MERGE_ENABLED; + minRegionCount = DEFAULT_MIN_REGION_COUNT; + mergeMinRegionAge = Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS); + mergeMinRegionSizeMb = DEFAULT_MERGE_MIN_REGION_SIZE_MB; + } + + private NormalizerConfiguration( + final Configuration conf, + final NormalizerConfiguration currentConfiguration + ) { + this.conf = conf; + splitEnabled = conf.getBoolean(SPLIT_ENABLED_KEY, DEFAULT_SPLIT_ENABLED); + mergeEnabled = conf.getBoolean(MERGE_ENABLED_KEY, DEFAULT_MERGE_ENABLED); + minRegionCount = parseMinRegionCount(conf); + mergeMinRegionAge = parseMergeMinRegionAge(conf); + mergeMinRegionSizeMb = parseMergeMinRegionSizeMb(conf); + logConfigurationUpdated(SPLIT_ENABLED_KEY, currentConfiguration.isSplitEnabled(), + splitEnabled); + logConfigurationUpdated(MERGE_ENABLED_KEY, currentConfiguration.isMergeEnabled(), + mergeEnabled); + logConfigurationUpdated(MIN_REGION_COUNT_KEY, currentConfiguration.getMinRegionCount(), + minRegionCount); + logConfigurationUpdated(MERGE_MIN_REGION_AGE_DAYS_KEY, + currentConfiguration.getMergeMinRegionAge(), mergeMinRegionAge); + logConfigurationUpdated(MERGE_MIN_REGION_SIZE_MB_KEY, + currentConfiguration.getMergeMinRegionSizeMb(), mergeMinRegionSizeMb); + } + + public Configuration getConf() { + return conf; + } + + public boolean isSplitEnabled() { + return splitEnabled; + } + + public boolean isMergeEnabled() { + return mergeEnabled; + } + + public int getMinRegionCount() { + return minRegionCount; + } + + public Period getMergeMinRegionAge() { + return mergeMinRegionAge; + } + + public long getMergeMinRegionSizeMb() { + return mergeMinRegionSizeMb; + } + } + /** * Inner class caries the state necessary to perform a single invocation of * {@link #computePlansForTable(TableName)}. Grabbing this data from the assignment manager diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java new file mode 100644 index 000000000000..00980233edce --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.RateLimiter; + +/** + * Test that configuration changes are propagated to all children. + */ +@Category({ MasterTests.class, SmallTests.class}) +public class TestRegionNormalizerManagerConfigurationObserver { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionNormalizerManagerConfigurationObserver.class); + + private static final HBaseTestingUtility testUtil = new HBaseTestingUtility(); + private static final Pattern rateLimitPattern = + Pattern.compile("RateLimiter\\[stableRate=(?.+)qps]"); + + private Configuration conf; + private SimpleRegionNormalizer normalizer; + @Mock private MasterServices masterServices; + @Mock private RegionNormalizerTracker tracker; + @Mock private RegionNormalizerChore chore; + @Mock private RegionNormalizerWorkQueue queue; + private RegionNormalizerWorker worker; + private ConfigurationManager configurationManager; + + @Before + public void before() { + MockitoAnnotations.initMocks(this); + conf = testUtil.getConfiguration(); + normalizer = new SimpleRegionNormalizer(); + worker = new RegionNormalizerWorker(conf, masterServices, normalizer, queue); + final RegionNormalizerManager normalizerManager = + new RegionNormalizerManager(tracker, chore, queue, worker); + configurationManager = new ConfigurationManager(); + configurationManager.registerObserver(normalizerManager); + } + + @Test + public void test() { + assertTrue(normalizer.isMergeEnabled()); + assertEquals(3, normalizer.getMinRegionCount()); + assertEquals(1_000_000L, parseConfiguredRateLimit(worker.getRateLimiter())); + + final Configuration newConf = new Configuration(conf); + // configs on SimpleRegionNormalizer + newConf.setBoolean("hbase.normalizer.merge.enabled", false); + newConf.setInt("hbase.normalizer.min.region.count", 100); + // config on RegionNormalizerWorker + newConf.set("hbase.normalizer.throughput.max_bytes_per_sec", "12g"); + + configurationManager.notifyAllObservers(newConf); + assertFalse(normalizer.isMergeEnabled()); + assertEquals(100, normalizer.getMinRegionCount()); + assertEquals(12_884L, parseConfiguredRateLimit(worker.getRateLimiter())); + } + + /** + * The {@link RateLimiter} class does not publicly expose its currently configured rate. It does + * offer this information in the {@link RateLimiter#toString()} method. It's fragile, but parse + * this value. The alternative would be to track the value explicitly in the worker, and the + * associated coordination overhead paid at runtime. See the related note on + * {@link RegionNormalizerWorker#getRateLimiter()}. + */ + private static long parseConfiguredRateLimit(final RateLimiter rateLimiter) { + final String val = rateLimiter.toString(); + final Matcher matcher = rateLimitPattern.matcher(val); + assertTrue(matcher.matches()); + final String parsedRate = matcher.group("rate"); + return (long) Double.parseDouble(parsedRate); + } +} From 85dfd9a349e5aa5c4f3c6773df1c2103a8fa56aa Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 2 Nov 2020 20:12:39 +0530 Subject: [PATCH 161/769] HBASE-25218 : Add 2.3.3 to the downloads page Closes #2615 Signed-off-by: Jan Hentschel --- src/site/xdoc/downloads.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 37ea25af5e75..bbd60e5e2e4a 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -45,24 +45,24 @@ under the License. - 2.3.2 + 2.3.3 - 2020/09/25 + 2020/11/02 - 2.3.1 vs 2.3.2 + 2.3.2 vs 2.3.3 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) From 7a41247ef7e4dd4d8eb0521a7c3cae3a5fbff7f4 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Mon, 2 Nov 2020 13:11:09 -0800 Subject: [PATCH 162/769] HBASE-25228 Delete dev-support/jenkins_precommit_jira_yetus.sh (#2611) Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel --- dev-support/jenkins_precommit_jira_yetus.sh | 182 -------------------- 1 file changed, 182 deletions(-) delete mode 100755 dev-support/jenkins_precommit_jira_yetus.sh diff --git a/dev-support/jenkins_precommit_jira_yetus.sh b/dev-support/jenkins_precommit_jira_yetus.sh deleted file mode 100755 index d721ac8faba6..000000000000 --- a/dev-support/jenkins_precommit_jira_yetus.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -if [[ "true" = "${DEBUG}" ]]; then - set -x - printenv -fi - -##To set jenkins Environment Variables: -export TOOLS_HOME=/home/jenkins/tools -#export JAVA_HOME=${JAVA_HOME_HADOOP_MACHINES_HOME} -export FINDBUGS_HOME=${TOOLS_HOME}/findbugs/latest -export CLOVER_HOME=${TOOLS_HOME}/clover/latest -#export MAVEN_HOME=${MAVEN_3_0_4_HOME} -export MAVEN_HOME=/home/jenkins/tools/maven/apache-maven-3.0.5 - -#export PATH=$PATH:${JAVA_HOME}/bin:${MAVEN_HOME}/bin: -export PATH=$PATH:${MAVEN_HOME}/bin: - -YETUS_RELEASE=0.12.0 -COMPONENT=${WORKSPACE}/component -TEST_FRAMEWORK=${WORKSPACE}/test_framework - -PATCHPROCESS=${WORKSPACE}/patchprocess -if [[ -d ${PATCHPROCESS} ]]; then - echo "[WARN] patch process already existed '${PATCHPROCESS}'" - rm -rf "${PATCHPROCESS}" -fi -mkdir -p "${PATCHPROCESS}" - - -## Checking on H* machine nonsense -echo "JAVA_HOME: ${JAVA_HOME}" -ls -l "${JAVA_HOME}" || true -echo "MAVEN_HOME: ${MAVEN_HOME}" -echo "maven version:" -mvn --offline --version || true -echo "getting machine specs, find in ${BUILD_URL}/artifact/patchprocess/machine/" -mkdir "${PATCHPROCESS}/machine" -cat /proc/cpuinfo >"${PATCHPROCESS}/machine/cpuinfo" 2>&1 || true -cat /proc/meminfo >"${PATCHPROCESS}/machine/meminfo" 2>&1 || true -cat /proc/diskstats >"${PATCHPROCESS}/machine/diskstats" 2>&1 || true -cat /sys/block/sda/stat >"${PATCHPROCESS}/machine/sys-block-sda-stat" 2>&1 || true -df -h >"${PATCHPROCESS}/machine/df-h" 2>&1 || true -ps -Awwf >"${PATCHPROCESS}/machine/ps-Awwf" 2>&1 || true -ifconfig -a >"${PATCHPROCESS}/machine/ifconfig-a" 2>&1 || true -lsblk -ta >"${PATCHPROCESS}/machine/lsblk-ta" 2>&1 || true -lsblk -fa >"${PATCHPROCESS}/machine/lsblk-fa" 2>&1 || true -cat /proc/loadavg >"${PATCHPROCESS}/loadavg" 2>&1 || true -ulimit -a >"${PATCHPROCESS}/machine/ulimit-a" 2>&1 || true - -## /H* - -### Download Yetus -if [[ "true" != "${USE_YETUS_PRERELEASE}" ]]; then - if [ ! -d "${TEST_FRAMEWORK}/yetus-${YETUS_RELEASE}" ]; then - mkdir -p "${TEST_FRAMEWORK}" - cd "${TEST_FRAMEWORK}" || exit 1 - # clear out any cached 'use a prerelease' versions - rm -rf apache-yetus-* - - mkdir -p "${TEST_FRAMEWORK}/.gpg" - chmod -R 700 "${TEST_FRAMEWORK}/.gpg" - - curl -L --fail -o "${TEST_FRAMEWORK}/KEYS_YETUS" https://dist.apache.org/repos/dist/release/yetus/KEYS - gpg --homedir "${TEST_FRAMEWORK}/.gpg" --import "${TEST_FRAMEWORK}/KEYS_YETUS" - - ## Release - curl -L --fail -O "https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz" - curl -L --fail -O "https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz.asc" - gpg --homedir "${TEST_FRAMEWORK}/.gpg" --verify "apache-yetus-${YETUS_RELEASE}-bin.tar.gz.asc" - tar xzpf "apache-yetus-${YETUS_RELEASE}-bin.tar.gz" - fi - TESTPATCHBIN=${TEST_FRAMEWORK}/apache-yetus-${YETUS_RELEASE}/bin/test-patch - TESTPATCHLIB=${TEST_FRAMEWORK}/apache-yetus-${YETUS_RELEASE}/lib/precommit -else - prerelease_dirs=("${TEST_FRAMEWORK}/${YETUS_PRERELEASE_GITHUB/\//-}-*") - if [ ! -d "${prerelease_dirs[0]}" ]; then - mkdir -p "${TEST_FRAMEWORK}" - cd "${TEST_FRAMEWORK}" || exit - ## from github - curl -L --fail "https://api.github.com/repos/${YETUS_PRERELEASE_GITHUB}/tarball/HEAD" > yetus.tar.gz - tar xvpf yetus.tar.gz - prerelease_dirs=("${TEST_FRAMEWORK}/${YETUS_PRERELEASE_GITHUB/\//-}-*") - fi - TESTPATCHBIN=${prerelease_dirs[0]}/precommit/test-patch.sh - TESTPATCHLIB=${prerelease_dirs[0]}/precommit -fi - -if [[ "true" = "${DEBUG}" ]]; then - # DEBUG print the test framework - ls -l "${TESTPATCHBIN}" - ls -la "${TESTPATCHLIB}/test-patch.d/" - # DEBUG print the local customization - if [ -d "${COMPONENT}/dev-support/test-patch.d" ]; then - ls -la "${COMPONENT}/dev-support/test-patch.d/" - fi - YETUS_ARGS=(--debug "${YETUS_ARGS[@]}") -fi - - -if [ ! -x "${TESTPATCHBIN}" ] && [ -n "${TEST_FRAMEWORK}" ] && [ -d "${TEST_FRAMEWORK}" ]; then - echo "Something is amiss with the test framework; removing it. please re-run." - rm -rf "${TEST_FRAMEWORK}" - exit 1 -fi - -cd "${WORKSPACE}" || exit - - -# -# Yetus *always* builds with JAVA_HOME, so no need to list it. -# -# non-docker-mode JDK: -# --findbugs-home=/home/jenkins/tools/findbugs/latest \ - -# docker-mode: (openjdk 7 added for free) -# --findbugs-home=/usr \ -# --docker \ -# --multijdkdirs="/usr/lib/jvm/java-8-openjdk-amd64" \ - -if [[ "true" = "${RUN_IN_DOCKER}" ]]; then - YETUS_ARGS=( - --docker \ - "--multijdkdirs=/usr/lib/jvm/java-8-openjdk-amd64" \ - "--findbugs-home=/usr" \ - "${YETUS_ARGS[@]}" \ - ) - if [ -r "${COMPONENT}/dev-support/docker/Dockerfile" ]; then - YETUS_ARGS=("--dockerfile=${COMPONENT}/dev-support/docker/Dockerfile" "${YETUS_ARGS[@]}") - fi -else - YETUS_ARGS=("--findbugs-home=/home/jenkins/tools/findbugs/latest" "${YETUS_ARGS[@]}") -fi - -if [ -d "${COMPONENT}/dev-support/test-patch.d" ]; then - YETUS_ARGS=("--user-plugins=${COMPONENT}/dev-support/test-patch.d" "${YETUS_ARGS[@]}") -fi - -# I don't trust Yetus compat enough yet, so in prerelease mode, skip our personality. -# this should give us an incentive to update the Yetus exemplar for HBase periodically. -if [ -r "${COMPONENT}/dev-support/hbase-personality.sh" ] && [[ "true" != "${USE_YETUS_PRERELEASE}" ]] ; then - YETUS_ARGS=("--personality=${COMPONENT}/dev-support/hbase-personality.sh" "${YETUS_ARGS[@]}") -fi - -if [[ true == "${QUICK_HADOOPCHECK}" ]]; then - YETUS_ARGS=("--quick-hadoopcheck" "${YETUS_ARGS[@]}") -fi - -if [[ true == "${SKIP_ERRORPRONE}" ]]; then - YETUS_ARGS=("--skip-errorprone" "${YETUS_ARGS[@]}") -fi - -YETUS_ARGS=("--skip-dirs=dev-support" "${YETUS_ARGS[@]}") - -/bin/bash "${TESTPATCHBIN}" \ - "${YETUS_ARGS[@]}" \ - --patch-dir="${PATCHPROCESS}" \ - --basedir="${COMPONENT}" \ - --mvn-custom-repos \ - --whitespace-eol-ignore-list=".*/generated/.*" \ - --whitespace-tabs-ignore-list=".*/generated/.*" \ - --jira-user=HBaseQA \ - --jira-password="${JIRA_PASSWORD}" \ - "HBASE-${ISSUE_NUM}" - -find "${COMPONENT}" -name target -exec chmod -R u+w {} \; From 5abbda196912bea7f1349d5549d09ff881b1d3cb Mon Sep 17 00:00:00 2001 From: Sandeep Pal <50725353+sandeepvinayak@users.noreply.github.com> Date: Tue, 3 Nov 2020 07:39:23 -0800 Subject: [PATCH 163/769] HBASE-24859: Optimize in-memory representation of HBase map reduce table splits (#2609) Patch fixes the single table input format case. Signed-off-by: Bharath Vissapragada --- .../hbase/mapreduce/TableInputFormatBase.java | 35 +++++++++++++------ .../hadoop/hbase/mapreduce/TableSplit.java | 34 ++++++++++++++---- .../TestTableInputFormatScanBase.java | 17 +++++---- 3 files changed, 62 insertions(+), 24 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index e7c5bf4fb2d7..8baf85ffb4d9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -26,10 +26,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -52,6 +48,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.net.DNS; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -287,7 +286,7 @@ public List getSplits(JobContext context) throws IOException { * Create one InputSplit per region * * @return The list of InputSplit for all the regions - * @throws IOException + * @throws IOException throws IOException */ private List oneInputSplitPerRegion() throws IOException { RegionSizeCalculator sizeCalculator = @@ -305,7 +304,10 @@ private List oneInputSplitPerRegion() throws IOException { } List splits = new ArrayList<>(1); long regionSize = sizeCalculator.getRegionSize(regLoc.getRegion().getRegionName()); - TableSplit split = new TableSplit(tableName, scan, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit split = new TableSplit(tableName, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); splits.add(split); @@ -345,7 +347,10 @@ private List oneInputSplitPerRegion() throws IOException { byte[] regionName = location.getRegion().getRegionName(); String encodedRegionName = location.getRegion().getEncodedName(); long regionSize = sizeCalculator.getRegionSize(regionName); - TableSplit split = new TableSplit(tableName, scan, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit split = new TableSplit(tableName, null, splitStart, splitStop, regionLocation, encodedRegionName, regionSize); splits.add(split); if (LOG.isDebugEnabled()) { @@ -362,7 +367,7 @@ private List oneInputSplitPerRegion() throws IOException { * @param n Number of ranges after splitting. Pass 1 means no split for the range * Pass 2 if you want to split the range in two; * @return A list of TableSplit, the size of the list is n - * @throws IllegalArgumentIOException + * @throws IllegalArgumentIOException throws IllegalArgumentIOException */ protected List createNInputSplitsUniform(InputSplit split, int n) throws IllegalArgumentIOException { @@ -409,9 +414,12 @@ protected List createNInputSplitsUniform(InputSplit split, int n) // Split Region into n chunks evenly byte[][] splitKeys = Bytes.split(startRow, endRow, true, n-1); for (int i = 0; i < splitKeys.length - 1; i++) { + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 //notice that the regionSize parameter may be not very accurate TableSplit tsplit = - new TableSplit(tableName, scan, splitKeys[i], splitKeys[i + 1], regionLocation, + new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], regionLocation, encodedRegionName, regionSize / n); res.add(tsplit); } @@ -488,7 +496,10 @@ public List calculateAutoBalancedSplits(List splits, lon } } i = j - 1; - TableSplit t = new TableSplit(tableName, scan, splitStartKey, splitEndKey, regionLocation, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit t = new TableSplit(tableName, null, splitStartKey, splitEndKey, regionLocation, encodedRegionName, totalSize); resultList.add(t); } @@ -508,7 +519,9 @@ String reverseDNS(InetAddress ipAddress) throws UnknownHostException { // reverse DNS using jndi doesn't work well with ipv6 addresses. ipAddressString = InetAddress.getByName(ipAddress.getHostAddress()).getHostName(); } - if (ipAddressString == null) throw new UnknownHostException("No host found for " + ipAddress); + if (ipAddressString == null) { + throw new UnknownHostException("No host found for " + ipAddress); + } hostName = Strings.domainNamePointerToHostName(ipAddressString); this.reverseDNSCacheMap.put(ipAddress, hostName); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index de42c31678ef..acce55e82ce8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -22,17 +22,16 @@ import java.io.DataOutput; import java.io.IOException; import java.util.Arrays; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A table split corresponds to a key range (low, high) and an optional scanner. @@ -40,7 +39,7 @@ */ @InterfaceAudience.Public public class TableSplit extends InputSplit -implements Writable, Comparable { + implements Writable, Comparable { /** @deprecated LOG variable would be made private. fix in hbase 3.0 */ @Deprecated public static final Logger LOG = LoggerFactory.getLogger(TableSplit.class); @@ -84,6 +83,16 @@ static Version fromCode(int code) { private byte [] endRow; private String regionLocation; private String encodedRegionName = ""; + + /** + * The scan object may be null but the serialized form of scan is never null + * or empty since we serialize the scan object with default values then. + * Having no scanner in TableSplit doesn't necessarily mean there is no scanner + * for mapreduce job, it just means that we do not need to set it for each split. + * For example, it is not required to have a scan object for + * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the + * job conf and scanner is supposed to be same for all the splits of table. + */ private String scan = ""; // stores the serialized form of the Scan private long length; // Contains estimation of region size in bytes @@ -182,12 +191,23 @@ public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, * Returns a Scan object from the stored string representation. * * @return Returns a Scan object based on the stored scanner. - * @throws IOException + * @throws IOException throws IOException if deserialization fails */ public Scan getScan() throws IOException { return TableMapReduceUtil.convertStringToScan(this.scan); } + /** + * Returns a scan string + * @return scan as string. Should be noted that this is not same as getScan().toString() + * because Scan object will have the default values when empty scan string is + * deserialized. Thus, getScan().toString() can never be empty + */ + @InterfaceAudience.Private + public String getScanAsString() { + return this.scan; + } + /** * Returns the table name converted to a byte array. * @see #getTable() diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index 4b18624f9241..7855747b1664 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -86,7 +85,7 @@ public static void tearDownAfterClass() throws Exception { * Pass the key and value to reduce. */ public static class ScanMapper - extends TableMapper { + extends TableMapper { /** * Pass the key and value to reduce. @@ -99,7 +98,7 @@ public static class ScanMapper @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { if (value.size() != 2) { throw new IOException("There should be two input columns"); } @@ -123,7 +122,7 @@ public void map(ImmutableBytesWritable key, Result value, * Checks the last and first key seen against the scanner boundaries. */ public static class ScanReducer - extends Reducer { private String first = null; @@ -131,7 +130,7 @@ public static class ScanReducer protected void reduce(ImmutableBytesWritable key, Iterable values, Context context) - throws IOException ,InterruptedException { + throws IOException ,InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); @@ -144,7 +143,7 @@ protected void reduce(ImmutableBytesWritable key, } protected void cleanup(Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration c = context.getConfiguration(); String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); @@ -249,6 +248,12 @@ protected void testNumOfSplits(int splitsPerRegion, int expectedNumOfSplits) tif.setConf(job.getConfiguration()); Assert.assertEquals(TABLE_NAME, table.getName()); List splits = tif.getSplits(job); + for (InputSplit split : splits) { + TableSplit tableSplit = (TableSplit) split; + // In table input format, we do no store the scanner at the split level + // because we use the scan object from the map-reduce job conf itself. + Assert.assertTrue(tableSplit.getScanAsString().isEmpty()); + } Assert.assertEquals(expectedNumOfSplits, splits.size()); } From 1eceab69b5d5e6977861b482fdd300d47e2807fc Mon Sep 17 00:00:00 2001 From: niuyulin Date: Wed, 4 Nov 2020 01:49:38 +0800 Subject: [PATCH 164/769] HBASE-25210 RegionInfo.isOffline is now a duplication with RegionInfo.isSplit (#2580) Signed-off-by: Duo Zhang Signed-off-by: stack --- .../org/apache/hadoop/hbase/client/MutableRegionInfo.java | 6 ++++++ .../java/org/apache/hadoop/hbase/client/RegionInfo.java | 6 ++++++ .../org/apache/hadoop/hbase/client/RegionInfoBuilder.java | 1 + 3 files changed, 13 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 5d48991cf205..028608db614d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -254,8 +254,11 @@ public MutableRegionInfo setSplit(boolean split) { /** * @return True if this region is offline. + * @deprecated since 3.0.0 and will be removed in 4.0.0 + * @see HBASE-25210 */ @Override + @Deprecated public boolean isOffline() { return this.offLine; } @@ -273,8 +276,11 @@ public MutableRegionInfo setOffline(boolean offLine) { /** * @return True if this is a split parent region. + * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. + * @see HBASE-25210 */ @Override + @Deprecated public boolean isSplitParent() { if (!isSplit()) { return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index d860c7681a37..d7460e9d15ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -218,12 +218,18 @@ public interface RegionInfo extends Comparable { /** * @return True if this region is offline. + * @deprecated since 3.0.0 and will be removed in 4.0.0 + * @see HBASE-25210 */ + @Deprecated boolean isOffline(); /** * @return True if this is a split parent region. + * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. + * @see HBASE-25210 */ + @Deprecated boolean isSplitParent(); /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index a9e7806ad9d3..cbf9e4a3c219 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -100,6 +100,7 @@ public RegionInfoBuilder setSplit(boolean split) { return this; } + @Deprecated public RegionInfoBuilder setOffline(boolean offLine) { this.offLine = offLine; return this; From c98e993b238f527fb8cdc7f8a03a3555bb1dc74a Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Tue, 3 Nov 2020 15:20:27 -0800 Subject: [PATCH 165/769] HBASE-25212 Optionally abort requests in progress after deciding a region should close (#2574) If hbase.regionserver.close.wait.abort is set to true, interrupt RPC handler threads holding the region close lock. Until requests in progress can be aborted, wait on the region close lock for a configurable interval (specified by hbase.regionserver.close.wait.time.ms, default 60000 (1 minute)). If we have failed to acquire the close lock after this interval elapses, if allowed (also specified by hbase.regionserver.close.wait.abort), abort the regionserver. We will attempt to interrupt any running handlers every hbase.regionserver.close.wait.interval.ms (default 10000 (10 seconds)) until either the close lock is acquired or we reach the maximum wait time. Define a subset of region operations as interruptible. Track threads holding the close lock transiting those operations. Set the thread interrupt status of tracked threads when trying to close the region. Use the thread interrupt status where safe to break out of request processing. Signed-off-by: Bharath Vissapragada Signed-off-by: Duo Zhang Signed-off-by: Reid Chan Signed-off-by: Viraj Jasani --- .../hadoop/hbase/regionserver/HRegion.java | 301 +++++++++++++-- .../hadoop/hbase/regionserver/Region.java | 3 +- .../hadoop/hbase/HBaseTestingUtility.java | 18 +- .../TestCacheOnWriteInSchema.java | 2 +- .../regionserver/TestFailedAppendAndSync.java | 10 +- .../hbase/regionserver/TestHRegion.java | 272 +++++++++++-- .../regionserver/TestHRegionReplayEvents.java | 2 +- .../TestHRegionWithInMemoryFlush.java | 7 +- .../regionserver/TestRegionIncrement.java | 2 +- .../regionserver/TestRegionInterrupt.java | 363 ++++++++++++++++++ .../hbase/regionserver/TestWALLockup.java | 10 +- .../regionserver/wal/AbstractTestFSWAL.java | 2 +- .../hbase/regionserver/wal/TestFSHLog.java | 2 +- .../wal/WALDurabilityTestBase.java | 12 +- 14 files changed, 911 insertions(+), 95 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 57a1e1f5de93..bca18dbcb013 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -688,7 +688,7 @@ void sawNoSuchFamily() { // Last flush time for each Store. Useful when we are flushing for each column private final ConcurrentMap lastStoreFlushTimeMap = new ConcurrentHashMap<>(); - final RegionServerServices rsServices; + protected RegionServerServices rsServices; private RegionServerAccounting rsAccounting; private long flushCheckInterval; // flushPerChanges is to prevent too many changes in memstore @@ -696,6 +696,10 @@ void sawNoSuchFamily() { private long blockingMemStoreSize; // Used to guard closes final ReentrantReadWriteLock lock; + // Used to track interruptible holders of the region lock. Currently that is only RPC handler + // threads. Boolean value in map determines if lock holder can be interrupted, normally true, + // but may be false when thread is transiting a critical section. + final ConcurrentHashMap regionLockHolders; // Stop updates lock private final ReentrantReadWriteLock updatesLock = new ReentrantReadWriteLock(); @@ -788,6 +792,7 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR; this.lock = new ReentrantReadWriteLock(conf.getBoolean(FAIR_REENTRANT_CLOSE_LOCK, DEFAULT_FAIR_REENTRANT_CLOSE_LOCK)); + this.regionLockHolders = new ConcurrentHashMap<>(); this.flushCheckInterval = conf.getInt(MEMSTORE_PERIODIC_FLUSH_INTERVAL, DEFAULT_CACHE_FLUSH_INTERVAL); this.flushPerChanges = conf.getLong(MEMSTORE_FLUSH_PER_CHANGES, DEFAULT_FLUSH_PER_CHANGES); @@ -1174,7 +1179,7 @@ public HStore call() throws IOException { LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + this); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw throwOnInterrupt(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { @@ -1578,6 +1583,13 @@ public Map> close() throws IOException { */ public static final long MAX_FLUSH_PER_CHANGES = 1000000000; // 1G + public static final String CLOSE_WAIT_ABORT = "hbase.regionserver.close.wait.abort"; + public static final boolean DEFAULT_CLOSE_WAIT_ABORT = true; + public static final String CLOSE_WAIT_TIME = "hbase.regionserver.close.wait.time.ms"; + public static final long DEFAULT_CLOSE_WAIT_TIME = 60000; // 1 minute + public static final String CLOSE_WAIT_INTERVAL = "hbase.regionserver.close.wait.interval.ms"; + public static final long DEFAULT_CLOSE_WAIT_INTERVAL = 10000; // 10 seconds + public Map> close(boolean abort) throws IOException { return close(abort, false); } @@ -1679,22 +1691,103 @@ private Map> doClose(boolean abort, MonitoredTask statu } } - if (timeoutForWriteLock == null - || timeoutForWriteLock == Long.MAX_VALUE) { - // block waiting for the lock for closing - lock.writeLock().lock(); // FindBugs: Complains UL_UNRELEASED_LOCK_EXCEPTION_PATH but seems fine - } else { - try { - boolean succeed = lock.writeLock().tryLock(timeoutForWriteLock, TimeUnit.SECONDS); - if (!succeed) { - throw new IOException("Failed to get write lock when closing region"); + // Set the closing flag + // From this point new arrivals at the region lock will get NSRE. + + this.closing.set(true); + LOG.info("Closing region {}", this); + + // Acquire the close lock + + // The configuration parameter CLOSE_WAIT_ABORT is overloaded to enable both + // the new regionserver abort condition and interrupts for running requests. + // If CLOSE_WAIT_ABORT is not enabled there is no change from earlier behavior, + // we will not attempt to interrupt threads servicing requests nor crash out + // the regionserver if something remains stubborn. + + final boolean canAbort = conf.getBoolean(CLOSE_WAIT_ABORT, DEFAULT_CLOSE_WAIT_ABORT); + boolean useTimedWait = false; + if (timeoutForWriteLock != null && timeoutForWriteLock != Long.MAX_VALUE) { + // convert legacy use of timeoutForWriteLock in seconds to new use in millis + timeoutForWriteLock = TimeUnit.SECONDS.toMillis(timeoutForWriteLock); + useTimedWait = true; + } else if (canAbort) { + timeoutForWriteLock = conf.getLong(CLOSE_WAIT_TIME, DEFAULT_CLOSE_WAIT_TIME); + useTimedWait = true; + } + if (LOG.isDebugEnabled()) { + LOG.debug((useTimedWait ? "Time limited wait" : "Waiting without time limit") + + " for close lock on " + this); + } + final long closeWaitInterval = conf.getLong(CLOSE_WAIT_INTERVAL, DEFAULT_CLOSE_WAIT_INTERVAL); + long elapsedWaitTime = 0; + if (useTimedWait) { + // Sanity check configuration + long remainingWaitTime = timeoutForWriteLock; + if (remainingWaitTime < closeWaitInterval) { + LOG.warn("Time limit for close wait of " + timeoutForWriteLock + + " ms is less than the configured lock acquisition wait interval " + + closeWaitInterval + " ms, using wait interval as time limit"); + remainingWaitTime = closeWaitInterval; + } + boolean acquired = false; + do { + long start = EnvironmentEdgeManager.currentTime(); + try { + acquired = lock.writeLock().tryLock(Math.min(remainingWaitTime, closeWaitInterval), + TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + // Interrupted waiting for close lock. More likely the server is shutting down, not + // normal operation, so aborting upon interrupt while waiting on this lock would not + // provide much value. Throw an IOE (as IIOE) like we would in the case where we + // fail to acquire the lock. + String msg = "Interrupted while waiting for close lock on " + this; + LOG.warn(msg, e); + throw (InterruptedIOException) new InterruptedIOException(msg).initCause(e); + } + long elapsed = EnvironmentEdgeManager.currentTime() - start; + elapsedWaitTime += elapsed; + remainingWaitTime -= elapsed; + if (canAbort && !acquired && remainingWaitTime > 0) { + // Before we loop to wait again, interrupt all region operations that might + // still be in progress, to encourage them to break out of waiting states or + // inner loops, throw an exception to clients, and release the read lock via + // endRegionOperation. + if (LOG.isDebugEnabled()) { + LOG.debug("Interrupting region operations after waiting for close lock for " + + elapsedWaitTime + " ms on " + this + ", " + remainingWaitTime + + " ms remaining"); + } + interruptRegionOperations(); } - } catch (InterruptedException e) { - throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } while (!acquired && remainingWaitTime > 0); + + // If we fail to acquire the lock, trigger an abort if we can; otherwise throw an IOE + // to let the caller know we could not proceed with the close. + if (!acquired) { + String msg = "Failed to acquire close lock on " + this + " after waiting " + + elapsedWaitTime + " ms"; + LOG.error(msg); + if (canAbort) { + // If we failed to acquire the write lock, abort the server + rsServices.abort(msg, null); + } + throw new IOException(msg); } + + } else { + + long start = EnvironmentEdgeManager.currentTime(); + lock.writeLock().lock(); + elapsedWaitTime = EnvironmentEdgeManager.currentTime() - start; + } - this.closing.set(true); - LOG.info("Closing region {}", this); + + if (LOG.isDebugEnabled()) { + LOG.debug("Acquired close lock on " + this + " after waiting " + + elapsedWaitTime + " ms"); + } + status.setStatus("Disabling writes for close"); try { if (this.isClosed()) { @@ -1782,7 +1875,7 @@ public Pair> call() throws IOException { familyFiles.addAll(storeFiles.getSecond()); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw throwOnInterrupt(e); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof IOException) { @@ -4549,6 +4642,11 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { MiniBatchOperationInProgress miniBatchOp = null; /** Keep track of the locks we hold so we can release them in finally clause */ List acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size()); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + try { // STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with // locked rows @@ -4562,20 +4660,31 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { return; } + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. Do it before we take the lock and disable interrupts for + // the WAL append. + checkInterrupt(); + lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount()); locked = true; + // From this point until memstore update this operation should not be interrupted. + disableInterrupts(); + // STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp // We should record the timestamp only after we have acquired the rowLock, // otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer // timestamp + long now = EnvironmentEdgeManager.currentTime(); batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks); // STEP 3. Build WAL edit + List> walEdits = batchOp.buildWALEdits(miniBatchOp); // STEP 4. Append the WALEdits to WAL and sync. + for(Iterator> it = walEdits.iterator(); it.hasNext();) { Pair nonceKeyWALEditPair = it.next(); walEdit = nonceKeyWALEditPair.getSecond(); @@ -4611,6 +4720,8 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { } releaseRowLocks(acquiredRowLocks); + enableInterrupts(); + final int finalLastIndexExclusive = miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size(); final boolean finalSuccess = success; @@ -6588,13 +6699,12 @@ protected RowLock getRowLockInternal(byte[] row, boolean readLock, final RowLock success = true; return result; } catch (InterruptedException ie) { - LOG.warn("Thread interrupted waiting for lock on row: {}, in region {}", rowKey, - getRegionInfo().getRegionNameAsString()); - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); + if (LOG.isDebugEnabled()) { + LOG.debug("Thread interrupted waiting for lock on row: {}, in region {}", rowKey, + getRegionInfo().getRegionNameAsString()); + } TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock"); - Thread.currentThread().interrupt(); - throw iie; + throw throwOnInterrupt(ie); } catch (Error error) { // The maximum lock count for read lock is 64K (hardcoded), when this maximum count // is reached, it will throw out an Error. This Error needs to be caught so it can @@ -7286,6 +7396,10 @@ private boolean populateResult(List results, KeyValueHeap heap, // Scanning between column families and thus the scope is between cells LimitScope limitScope = LimitScope.BETWEEN_CELLS; do { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + // We want to maintain any progress that is made towards the limits while scanning across // different column families. To do this, we toggle the keep progress flag on during calls // to the StoreScanner to ensure that any progress made thus far is not wiped away. @@ -7384,6 +7498,10 @@ private boolean nextInternal(List results, ScannerContext scannerContext) } } + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + // Let's see what we have in the storeHeap. Cell current = this.storeHeap.peek(); @@ -7464,6 +7582,10 @@ private boolean nextInternal(List results, ScannerContext scannerContext) return true; } + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + Cell nextKv = this.storeHeap.peek(); shouldStop = shouldStop(nextKv); // save that the row was empty before filters applied to it. @@ -7623,6 +7745,9 @@ protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws Cell next; while ((next = this.storeHeap.peek()) != null && CellUtil.matchingRows(next, curRowCell)) { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); this.storeHeap.next(MOCKED_LIST); } resetFilters(); @@ -8288,6 +8413,11 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, // when it assigns the edit a sequencedid (A.K.A the mvcc write number). WriteEntry writeEntry = null; MemStoreSizing memstoreAccounting = new NonThreadSafeMemStoreSizing(); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + try { boolean success = false; try { @@ -8303,9 +8433,19 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, prevRowLock = rowLock; } } + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. Do it before we take the lock and disable interrupts for + // the WAL append. + checkInterrupt(); + // STEP 3. Region lock lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : acquiredRowLocks.size()); locked = true; + + // From this point until memstore update this operation should not be interrupted. + disableInterrupts(); + long now = EnvironmentEdgeManager.currentTime(); // STEP 4. Let the processor scan the rows, generate mutations and add waledits doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout); @@ -8371,6 +8511,8 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, } // release locks if some were acquired but another timed out releaseRowLocks(acquiredRowLocks); + + enableInterrupts(); } // 12. Run post-process hook @@ -8433,6 +8575,8 @@ public Void call() throws IOException { rowProcessorExecutor.execute(task); try { task.get(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException ie) { + throw throwOnInterrupt(ie); } catch (TimeoutException te) { String row = processor.getRowsToLock().isEmpty() ? "" : " on row(s):" + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "..."; @@ -8528,11 +8672,6 @@ private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, Listcells using comparator */ @@ -8558,7 +8697,7 @@ private static List sort(List cells, final CellComparator comparator (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing (3 * ClassSize.ATOMIC_LONG) + // numPutsWithoutWAL, dataInMemoryWithoutWAL, // compactionsFailed - (2 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints + (3 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints, regionLockHolders WriteState.HEAP_SIZE + // writestate ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + // stores (2 * ClassSize.REENTRANT_LOCK) + // lock, updatesLock @@ -8730,12 +8869,22 @@ public void startRegionOperation() throws IOException { @Override public void startRegionOperation(Operation op) throws IOException { + boolean isInterruptableOp = false; switch (op) { - case GET: // read operations + case GET: // interruptible read operations case SCAN: + isInterruptableOp = true; checkReadsEnabled(); break; - default: + case INCREMENT: // interruptible write operations + case APPEND: + case PUT: + case DELETE: + case BATCH_MUTATE: + case CHECK_AND_MUTATE: + isInterruptableOp = true; + break; + default: // all others break; } if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION @@ -8748,6 +8897,12 @@ public void startRegionOperation(Operation op) throws IOException { throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } lock(lock.readLock()); + // Update regionLockHolders ONLY for any startRegionOperation call that is invoked from + // an RPC handler + Thread thisThread = Thread.currentThread(); + if (isInterruptableOp) { + regionLockHolders.put(thisThread, true); + } if (this.closed.get()) { lock.readLock().unlock(); throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); @@ -8762,6 +8917,11 @@ public void startRegionOperation(Operation op) throws IOException { coprocessorHost.postStartRegionOperation(op); } } catch (Exception e) { + if (isInterruptableOp) { + // would be harmless to remove what we didn't add but we know by 'isInterruptableOp' + // if we added this thread to regionLockHolders + regionLockHolders.remove(thisThread); + } lock.readLock().unlock(); throw new IOException(e); } @@ -8777,6 +8937,8 @@ public void closeRegionOperation(Operation operation) throws IOException { if (operation == Operation.SNAPSHOT) { stores.values().forEach(HStore::postSnapshotOperation); } + Thread thisThread = Thread.currentThread(); + regionLockHolders.remove(thisThread); lock.readLock().unlock(); if (coprocessorHost != null) { coprocessorHost.postCloseRegionOperation(operation); @@ -8792,8 +8954,7 @@ public void closeRegionOperation(Operation operation) throws IOException { * @throws RegionTooBusyException if failed to get the lock in time * @throws InterruptedIOException if interrupted while waiting for a lock */ - private void startBulkRegionOperation(boolean writeLockNeeded) - throws NotServingRegionException, RegionTooBusyException, InterruptedIOException { + private void startBulkRegionOperation(boolean writeLockNeeded) throws IOException { if (this.closing.get()) { throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } @@ -8804,6 +8965,7 @@ private void startBulkRegionOperation(boolean writeLockNeeded) else lock.readLock().unlock(); throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); } + regionLockHolders.put(Thread.currentThread(), true); } /** @@ -8811,6 +8973,7 @@ private void startBulkRegionOperation(boolean writeLockNeeded) * to the try block of #startRegionOperation */ private void closeBulkRegionOperation(){ + regionLockHolders.remove(Thread.currentThread()); if (lock.writeLock().isHeldByCurrentThread()) lock.writeLock().unlock(); else lock.readLock().unlock(); } @@ -8841,7 +9004,7 @@ private void recordMutationWithoutWal(final Map> familyMap) dataInMemoryWithoutWAL.add(mutationSize); } - private void lock(final Lock lock) throws RegionTooBusyException, InterruptedIOException { + private void lock(final Lock lock) throws IOException { lock(lock, 1); } @@ -8850,8 +9013,7 @@ private void lock(final Lock lock) throws RegionTooBusyException, InterruptedIOE * if failed to get the lock in time. Throw InterruptedIOException * if interrupted while waiting for the lock. */ - private void lock(final Lock lock, final int multiplier) - throws RegionTooBusyException, InterruptedIOException { + private void lock(final Lock lock, final int multiplier) throws IOException { try { final long waitTime = Math.min(maxBusyWaitDuration, busyWaitDuration * Math.min(multiplier, maxBusyWaitMultiplier)); @@ -8869,10 +9031,10 @@ private void lock(final Lock lock, final int multiplier) throw rtbe; } } catch (InterruptedException ie) { - LOG.info("Interrupted while waiting for a lock in region {}", this); - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); - throw iie; + if (LOG.isDebugEnabled()) { + LOG.debug("Interrupted while waiting for a lock in region {}", this); + } + throw throwOnInterrupt(ie); } } @@ -9000,6 +9162,67 @@ public long getReadPoint() { return getReadPoint(IsolationLevel.READ_COMMITTED); } + /** + * If a handler thread is eligible for interrupt, make it ineligible. Should be paired + * with {{@link #enableInterrupts()}. + */ + protected void disableInterrupts() { + regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> false); + } + + /** + * If a handler thread was made ineligible for interrupt via {{@link #disableInterrupts()}, + * make it eligible again. No-op if interrupts are already enabled. + */ + protected void enableInterrupts() { + regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> true); + } + + /** + * Interrupt any region options that have acquired the region lock via + * {@link #startRegionOperation(org.apache.hadoop.hbase.regionserver.Region.Operation)}, + * or {@link #startBulkRegionOperation(boolean)}. + */ + private void interruptRegionOperations() { + for (Map.Entry entry: regionLockHolders.entrySet()) { + // An entry in this map will have a boolean value indicating if it is currently + // eligible for interrupt; if so, we should interrupt it. + if (entry.getValue().booleanValue()) { + entry.getKey().interrupt(); + } + } + } + + /** + * Check thread interrupt status and throw an exception if interrupted. + * @throws NotServingRegionException if region is closing + * @throws InterruptedIOException if interrupted but region is not closing + */ + // Package scope for tests + void checkInterrupt() throws NotServingRegionException, InterruptedIOException { + if (Thread.interrupted()) { + if (this.closing.get()) { + throw new NotServingRegionException( + getRegionInfo().getRegionNameAsString() + " is closing"); + } + throw new InterruptedIOException(); + } + } + + /** + * Throw the correct exception upon interrupt + * @param t cause + */ + // Package scope for tests + IOException throwOnInterrupt(Throwable t) { + if (this.closing.get()) { + return (NotServingRegionException) new NotServingRegionException( + getRegionInfo().getRegionNameAsString() + " is closing") + .initCause(t); + } + return (InterruptedIOException) new InterruptedIOException().initCause(t); + } + /** * {@inheritDoc} */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 79df0013e087..900e5711415e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -199,7 +199,8 @@ public interface Region extends ConfigurationObserver { */ enum Operation { ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE, - REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH + REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH, + CHECK_AND_MUTATE } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index cb2e9e928931..29e888372908 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1979,14 +1979,15 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws /** * Create an HRegion that writes to the local tmp dirs with specified wal * @param info regioninfo + * @param conf configuration * @param desc table descriptor * @param wal wal for this region. * @return created hregion * @throws IOException */ - public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal) - throws IOException { - return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal); + public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc, + WAL wal) throws IOException { + return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal); } /** @@ -2000,14 +2001,15 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal * @throws IOException */ public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { - return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, isReadOnly, + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) + throws IOException { + return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly, durability, wal, null, families); } public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey, - byte[] stopKey, boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore, - byte[]... families) throws IOException { + byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, + boolean[] compactedMemStore, byte[]... families) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); builder.setReadOnly(isReadOnly); int i = 0; @@ -2027,7 +2029,7 @@ public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] s builder.setDurability(durability); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); - return createLocalHRegion(info, builder.build(), wal); + return createLocalHRegion(info, conf, builder.build(), wal); } // diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 3c9155371199..60ca5b3896b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -181,7 +181,7 @@ public void setUp() throws IOException { RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); walFactory = new WALFactory(conf, id); - region = TEST_UTIL.createLocalHRegion(info, htd, walFactory.getWAL(info)); + region = TEST_UTIL.createLocalHRegion(info, conf, htd, walFactory.getWAL(info)); region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); store = new HStore(region, hcd, conf, false); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java index fdf96dab87fc..dab82144f04a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java @@ -203,7 +203,7 @@ public void testLockupAroundBadAssignSync() throws IOException { boolean threwOnAppend = false; boolean threwOnBoth = false; - HRegion region = initHRegion(tableName, null, null, dodgyWAL); + HRegion region = initHRegion(tableName, null, null, CONF, dodgyWAL); try { // Get some random bytes. byte[] value = Bytes.toBytes(getName()); @@ -316,11 +316,11 @@ public void testLockupAroundBadAssignSync() throws IOException { * @return A region on which you must call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ - public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal) - throws IOException { + public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + Configuration conf, WAL wal) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, false, Durability.SYNC_WAL, - wal, COLUMN_FAMILY_BYTES); + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, + Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index bbc73e3bda59..da3f2204ddd3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -30,6 +30,8 @@ import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -136,6 +138,7 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.HRegion.MutationBatchOperation; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; +import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.Region.RowLock; import org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; @@ -354,7 +357,7 @@ public void sync(long txid) throws IOException { Path rootDir = new Path(dir + "testMemstoreSnapshotSize"); MyFaultyFSLog faultyLog = new MyFaultyFSLog(fs, rootDir, "testMemstoreSnapshotSize", CONF); faultyLog.init(); - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, faultyLog, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, faultyLog, COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); @@ -401,8 +404,8 @@ public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOExcep Path rootDir = new Path(dir + testName); FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF); hLog.init(); - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, - COLUMN_FAMILY_BYTES); + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, hLog, + COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); assertEquals(0, region.getMemStoreDataSize()); @@ -500,7 +503,7 @@ public Object run() throws Exception { HRegion region = null; try { // Initialize region - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); @@ -565,7 +568,7 @@ public Object run() throws Exception { HRegion region = null; try { // Initialize region - region = initHRegion(tableName, null, null, false, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); @@ -1055,7 +1058,7 @@ public void testFlushMarkers() throws Exception { final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build()); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family); + HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -1260,7 +1263,7 @@ public long getSyncedLength() { CommonFSUtils.getRootDir(walConf), method, walConf); wal.init(); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family); + HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); int i = 0; Put put = new Put(Bytes.toBytes(i)); put.setDurability(Durability.SKIP_WAL); // have to skip mocked wal @@ -1291,7 +1294,7 @@ public long getSyncedLength() { method, walConf); wal.init(); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family); + HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); region.put(put); // 3. Test case where ABORT_FLUSH will throw exception. // Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with @@ -3240,7 +3243,7 @@ public void testDataInMemoryWithoutWAL() throws IOException { hLog.init(); // This chunk creation is done throughout the code base. Do we want to move it into core? // It is missing from this test. W/o it we NPE. - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); Cell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) @@ -3513,7 +3516,7 @@ public void testGetWithFilter() throws IOException, InterruptedException { RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log"); final WAL wal = HBaseTestingUtility.createWal(TEST_UTIL.getConfiguration(), logDir, info); - this.region = TEST_UTIL.createLocalHRegion(info, tableDescriptor, wal); + this.region = TEST_UTIL.createLocalHRegion(info, CONF, tableDescriptor, wal); // Put 4 version to memstore long ts = 0; @@ -5405,7 +5408,7 @@ private void durabilityTest(String method, Durability tableDurability, final WALFactory wals = new WALFactory(walConf, HBaseTestingUtility.getRandomUUID().toString()); final WAL wal = spy(wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build())); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, tableDurability, wal, + HConstants.EMPTY_END_ROW, CONF, false, tableDurability, wal, new byte[][] { family }); Put put = new Put(Bytes.toBytes("r1")); @@ -5772,7 +5775,7 @@ protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopK RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); final WAL wal = HBaseTestingUtility.createWal(conf, logDir, hri); - return initHRegion(tableName, startKey, stopKey, isReadOnly, Durability.SYNC_WAL, wal, + return initHRegion(tableName, startKey, stopKey, conf, isReadOnly, Durability.SYNC_WAL, wal, families); } @@ -5781,11 +5784,12 @@ protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopK * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, + byte[]... families) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, - isReadOnly, durability, wal, families); + conf, isReadOnly, durability, wal, families); } /** @@ -6052,9 +6056,9 @@ public void testReverseScanner_smaller_blocksize() throws IOException { byte[] col1 = Bytes.toBytes("col1"); byte[] col2 = Bytes.toBytes("col2"); long ts = 1; - HBaseConfiguration config = new HBaseConfiguration(); - config.setInt("test.block.size", 1); - this.region = initHRegion(tableName, method, config, families); + Configuration conf = new Configuration(CONF); + conf.setInt("test.block.size", 1); + this.region = initHRegion(tableName, method, conf, families); KeyValue kv1 = new KeyValue(rowA, cf, col1, ts, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowB, cf, col1, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowC, cf, col1, ts, KeyValue.Type.Put, null); @@ -6132,7 +6136,7 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() byte[][] families = { cf1, cf2, cf3 }; byte[] col = Bytes.toBytes("C"); long ts = 1; - HBaseConfiguration conf = new HBaseConfiguration(); + Configuration conf = new Configuration(CONF); // disable compactions in this test. conf.setInt("hbase.hstore.compactionThreshold", 10000); this.region = initHRegion(tableName, method, conf, families); @@ -6294,7 +6298,7 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() byte[][] families = { cf1, cf2, cf3, cf4 }; byte[] col = Bytes.toBytes("C"); long ts = 1; - HBaseConfiguration conf = new HBaseConfiguration(); + Configuration conf = new Configuration(CONF); // disable compactions in this test. conf.setInt("hbase.hstore.compactionThreshold", 10000); this.region = initHRegion(tableName, method, conf, families); @@ -6360,7 +6364,7 @@ public void testReverseScanner_StackOverflow() throws IOException { byte[] cf1 = Bytes.toBytes("CF1"); byte[][] families = {cf1}; byte[] col = Bytes.toBytes("C"); - HBaseConfiguration conf = new HBaseConfiguration(); + Configuration conf = new Configuration(CONF); this.region = initHRegion(tableName, method, conf, families); // setup with one storefile and one memstore, to create scanner and get an earlier readPt Put put = new Put(Bytes.toBytes("19998")); @@ -6409,8 +6413,7 @@ public void testReverseScanShouldNotScanMemstoreIfReadPtLesser() throws Exceptio byte[] cf1 = Bytes.toBytes("CF1"); byte[][] families = { cf1 }; byte[] col = Bytes.toBytes("C"); - HBaseConfiguration conf = new HBaseConfiguration(); - this.region = initHRegion(tableName, method, conf, families); + this.region = initHRegion(tableName, method, CONF, families); // setup with one storefile and one memstore, to create scanner and get an earlier readPt Put put = new Put(Bytes.toBytes("19996")); put.addColumn(cf1, col, Bytes.toBytes("val")); @@ -6462,8 +6465,7 @@ public void testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception { byte[][] families = { cf1 }; byte[] col = Bytes.toBytes("C"); - HBaseConfiguration conf = new HBaseConfiguration(); - this.region = initHRegion(tableName, method, conf, families); + this.region = initHRegion(tableName, method, CONF, families); Put put = new Put(Bytes.toBytes("199996")); put.addColumn(cf1, col, Bytes.toBytes("val")); @@ -7364,4 +7366,226 @@ protected List doCompaction(CompactionRequestImpl cr, return super.doCompaction(cr, filesToCompact, user, compactionStartTime, newFiles); } } + + @Test + public void testCloseNoInterrupt() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Disable close thread interrupt and server abort behavior + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, false); + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, 1000); + region = initHRegion(tableName, method, conf, families); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean holderInterrupted = new AtomicBoolean(); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + latch.countDown(); + try { + Thread.sleep(SLEEP_TIME); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + holderInterrupted.set(true); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + region.close(); + region = null; + holder.join(); + + assertFalse("Region lock holder should not have been interrupted", holderInterrupted.get()); + } + + @Test + public void testCloseInterrupt() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Enable close thread interrupt and server abort behavior + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + // Speed up the unit test, no need to wait default 10 seconds. + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, 1000); + region = initHRegion(tableName, method, conf, families); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean holderInterrupted = new AtomicBoolean(); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + latch.countDown(); + try { + Thread.sleep(SLEEP_TIME); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + holderInterrupted.set(true); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + region.close(); + region = null; + holder.join(); + + assertTrue("Region lock holder was not interrupted", holderInterrupted.get()); + } + + @Test + public void testCloseAbort() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Enable close thread interrupt and server abort behavior. + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + // Set the abort interval to a fraction of sleep time so we are guaranteed to be aborted. + conf.setInt(HRegion.CLOSE_WAIT_TIME, SLEEP_TIME / 2); + // Set the wait interval to a fraction of sleep time so we are guaranteed to be interrupted. + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, SLEEP_TIME / 4); + region = initHRegion(tableName, method, conf, families); + RegionServerServices rsServices = mock(RegionServerServices.class); + when(rsServices.getServerName()).thenReturn(ServerName.valueOf("localhost", 1000, 1000)); + region.rsServices = rsServices; + + final CountDownLatch latch = new CountDownLatch(1); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + latch.countDown(); + // Hold the lock for SLEEP_TIME seconds no matter how many times we are interrupted. + int timeRemaining = SLEEP_TIME; + while (timeRemaining > 0) { + long start = EnvironmentEdgeManager.currentTime(); + try { + Thread.sleep(timeRemaining); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + } + long end = EnvironmentEdgeManager.currentTime(); + timeRemaining -= end - start; + if (timeRemaining < 0) { + timeRemaining = 0; + } + if (timeRemaining > 0) { + LOG.info("Sleeping again, remaining time " + timeRemaining + " ms"); + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + try { + region.close(); + } catch (IOException e) { + LOG.info("Caught expected exception", e); + } + region = null; + holder.join(); + + // Verify the region tried to abort the server + verify(rsServices, atLeast(1)).abort(anyString(),any()); + } + + @Test + public void testInterruptProtection() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Enable close thread interrupt and server abort behavior. + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, 1000); + region = initHRegion(tableName, method, conf, families); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean holderInterrupted = new AtomicBoolean(); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + LOG.info("Protecting against interrupts"); + region.disableInterrupts(); + try { + latch.countDown(); + try { + Thread.sleep(SLEEP_TIME); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + holderInterrupted.set(true); + } + } finally { + region.enableInterrupts(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + region.close(); + region = null; + holder.join(); + + assertFalse("Region lock holder should not have been interrupted", holderInterrupted.get()); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index a8c12052bb16..9ecdc455f5f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -1709,6 +1709,6 @@ private void putDataByReplay(HRegion region, private static HRegion initHRegion(byte[] tableName, byte[]... families) throws IOException { return TEST_UTIL.createLocalHRegion(TableName.valueOf(tableName), HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.SYNC_WAL, null, families); + HConstants.EMPTY_END_ROW, CONF, false, Durability.SYNC_WAL, null, families); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java index 59a0741721ba..e64994aa310b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -51,7 +53,8 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion { */ @Override public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) + throws IOException { boolean[] inMemory = new boolean[families.length]; for(int i = 0; i < inMemory.length; i++) { inMemory[i] = true; @@ -59,7 +62,7 @@ public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, - isReadOnly, durability, wal, inMemory, families); + conf, isReadOnly, durability, wal, inMemory, families); } @Override int getTestCountForTestWritesWhileScanning() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java index 710042e9c276..4792869b2f90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java @@ -87,7 +87,7 @@ private HRegion getRegion(final Configuration conf, final String tableName) thro ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(TableName.valueOf(tableName), HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES); + HConstants.EMPTY_BYTE_ARRAY, conf, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES); } private void closeRegion(final HRegion region) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java new file mode 100644 index 000000000000..10fa0b9af755 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java @@ -0,0 +1,363 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.filter.FilterBase; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALEdit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({RegionServerTests.class, LargeTests.class}) +public class TestRegionInterrupt { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionInterrupt.class); + + private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final Logger LOG = LoggerFactory.getLogger(TestRegionInterrupt.class); + + static final byte[] FAMILY = Bytes.toBytes("info"); + + static long sleepTime; + + @Rule + public TableNameTestRule name = new TableNameTestRule(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + conf.setClass(HConstants.REGION_IMPL, InterruptInterceptingHRegion.class, Region.class); + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + // Ensure the sleep interval is long enough for interrupts to occur. + long waitInterval = conf.getLong(HRegion.CLOSE_WAIT_INTERVAL, + HRegion.DEFAULT_CLOSE_WAIT_INTERVAL); + sleepTime = waitInterval * 2; + // Try to bound the running time of this unit if expected actions do not take place. + conf.setLong(HRegion.CLOSE_WAIT_TIME, sleepTime * 2); + } + + @Before + public void setUp() throws Exception { + TEST_UTIL.startMiniCluster(); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testCloseInterruptScanning() throws Exception { + final TableName tableName = name.getTableName(); + LOG.info("Creating table " + tableName); + try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { + // load some data + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + TEST_UTIL.loadTable(table, FAMILY); + final AtomicBoolean expectedExceptionCaught = new AtomicBoolean(false); + // scan the table in the background + Thread scanner = new Thread(new Runnable() { + @Override + public void run() { + Scan scan = new Scan(); + scan.addFamily(FAMILY); + scan.setFilter(new DelayingFilter()); + try { + LOG.info("Starting scan"); + try (ResultScanner rs = table.getScanner(scan)) { + Result r; + do { + r = rs.next(); + if (r != null) { + LOG.info("Scanned row " + Bytes.toStringBinary(r.getRow())); + } + } while (r != null); + } + } catch (IOException e) { + LOG.info("Scanner caught exception", e); + expectedExceptionCaught.set(true); + } finally { + LOG.info("Finished scan"); + } + } + }); + scanner.start(); + + // Wait for the filter to begin sleeping + LOG.info("Waiting for scanner to start"); + Waiter.waitFor(TEST_UTIL.getConfiguration(), 10*1000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return DelayingFilter.isSleeping(); + } + }); + + // Offline the table, this will trigger closing + LOG.info("Offlining table " + tableName); + TEST_UTIL.getAdmin().disableTable(tableName); + + // Wait for scanner termination + scanner.join(); + + // When we get here the region has closed and the table is offline + assertTrue("Region operations were not interrupted", + InterruptInterceptingHRegion.wasInterrupted()); + assertTrue("Scanner did not catch expected exception", expectedExceptionCaught.get()); + } + } + + @Test + public void testCloseInterruptMutation() throws Exception { + final TableName tableName = name.getTableName(); + final Admin admin = TEST_UTIL.getAdmin(); + // Create the test table + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(MutationDelayingCoprocessor.class.getName()) + .build(); + LOG.info("Creating table " + tableName); + admin.createTable(htd); + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + + // Insert some data in the background + LOG.info("Starting writes to table " + tableName); + final int NUM_ROWS = 100; + final AtomicBoolean expectedExceptionCaught = new AtomicBoolean(false); + Thread inserter = new Thread(new Runnable() { + @Override + public void run() { + try (BufferedMutator t = admin.getConnection().getBufferedMutator(tableName)) { + for (int i = 0; i < NUM_ROWS; i++) { + LOG.info("Writing row " + i + " to " + tableName); + byte[] value = new byte[10], row = Bytes.toBytes(Integer.toString(i)); + Bytes.random(value); + t.mutate(new Put(row).addColumn(FAMILY, HConstants.EMPTY_BYTE_ARRAY, value)); + t.flush(); + } + } catch (IOException e) { + LOG.info("Inserter caught exception", e); + expectedExceptionCaught.set(true); + } + } + }); + inserter.start(); + + // Wait for delayed insertion to begin + LOG.info("Waiting for mutations to start"); + Waiter.waitFor(TEST_UTIL.getConfiguration(), 10*1000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return MutationDelayingCoprocessor.isSleeping(); + } + }); + + // Offline the table, this will trigger closing + LOG.info("Offlining table " + tableName); + admin.disableTable(tableName); + + // Wait for the inserter to finish + inserter.join(); + + // When we get here the region has closed and the table is offline + assertTrue("Region operations were not interrupted", + InterruptInterceptingHRegion.wasInterrupted()); + assertTrue("Inserter did not catch expected exception", expectedExceptionCaught.get()); + + } + + public static class InterruptInterceptingHRegion extends HRegion { + + private static boolean interrupted = false; + + public static boolean wasInterrupted() { + return interrupted; + } + + public InterruptInterceptingHRegion(Path tableDir, WAL wal, FileSystem fs, + Configuration conf, RegionInfo regionInfo, TableDescriptor htd, + RegionServerServices rsServices) { + super(tableDir, wal, fs, conf, regionInfo, htd, rsServices); + } + + public InterruptInterceptingHRegion(HRegionFileSystem fs, WAL wal, Configuration conf, + TableDescriptor htd, RegionServerServices rsServices) { + super(fs, wal, conf, htd, rsServices); + } + + @Override + void checkInterrupt() throws NotServingRegionException, InterruptedIOException { + try { + super.checkInterrupt(); + } catch (NotServingRegionException | InterruptedIOException e) { + interrupted = true; + throw e; + } + } + + @Override + IOException throwOnInterrupt(Throwable t) { + interrupted = true; + return super.throwOnInterrupt(t); + } + + } + + public static class DelayingFilter extends FilterBase { + + static volatile boolean sleeping = false; + + public static boolean isSleeping() { + return sleeping; + } + + @Override + public ReturnCode filterCell(Cell v) throws IOException { + LOG.info("Starting sleep on " + v); + sleeping = true; + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + // restore interrupt status so region scanner can handle it as expected + Thread.currentThread().interrupt(); + LOG.info("Interrupted during sleep on " + v); + } finally { + LOG.info("Done sleep on " + v); + sleeping = false; + } + return ReturnCode.INCLUDE; + } + + public static DelayingFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + // Just return a new instance. + return new DelayingFilter(); + } + + } + + public static class MutationDelayingCoprocessor implements RegionCoprocessor, RegionObserver { + + static volatile boolean sleeping = false; + + public static boolean isSleeping() { + return sleeping; + } + + private void doSleep(Region.Operation op) { + LOG.info("Starting sleep for " + op); + sleeping = true; + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + // restore interrupt status so doMiniBatchMutation etc. can handle it as expected + Thread.currentThread().interrupt(); + LOG.info("Interrupted during " + op); + } finally { + LOG.info("Done"); + sleeping = false; + } + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void prePut(ObserverContext c, Put put, WALEdit edit, + Durability durability) throws IOException { + doSleep(Region.Operation.PUT); + RegionObserver.super.prePut(c, put, edit, durability); + } + + @Override + public void preDelete(ObserverContext c, Delete delete, + WALEdit edit, Durability durability) throws IOException { + doSleep(Region.Operation.DELETE); + RegionObserver.super.preDelete(c, delete, edit, durability); + } + + @Override + public Result preAppend(ObserverContext c, Append append) + throws IOException { + doSleep(Region.Operation.APPEND); + return RegionObserver.super.preAppend(c, append); + } + + @Override + public Result preIncrement(ObserverContext c, Increment increment) + throws IOException { + doSleep(Region.Operation.INCREMENT); + return RegionObserver.super.preIncrement(c, increment); + } + + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index ce7919e36eec..e850853b60e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -226,7 +226,7 @@ public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { // There is no 'stop' once a logRoller is running.. it just dies. logRoller.start(); // Now get a region and start adding in edits. - final HRegion region = initHRegion(tableName, null, null, dodgyWAL); + final HRegion region = initHRegion(tableName, null, null, CONF, dodgyWAL); byte [] bytes = Bytes.toBytes(getName()); NavigableMap scopes = new TreeMap<>( Bytes.BYTES_COMPARATOR); @@ -557,11 +557,11 @@ public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} * when done. */ - private static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal) - throws IOException { + private static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + Configuration conf, WAL wal) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, false, Durability.SYNC_WAL, - wal, COLUMN_FAMILY_BYTES); + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, + Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index bdc516ce724b..8a82848f3658 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -563,7 +563,7 @@ private HRegion createHoldingHRegion(Configuration conf, TableDescriptor htd, WA RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - TEST_UTIL.createLocalHRegion(hri, htd, wal).close(); + TEST_UTIL.createLocalHRegion(hri, CONF, htd, wal).close(); RegionServerServices rsServices = mock(RegionServerServices.class); when(rsServices.getServerName()).thenReturn(ServerName.valueOf("localhost:12345", 123456)); when(rsServices.getConfiguration()).thenReturn(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java index a655bdaf7c14..e763896d8df7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java @@ -168,7 +168,7 @@ public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - final HRegion region = TEST_UTIL.createLocalHRegion(hri, htd, log); + final HRegion region = TEST_UTIL.createLocalHRegion(hri, CONF, htd, log); ExecutorService exec = Executors.newFixedThreadPool(2); // do a regular write first because of memstore size calculation. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java index 2dd948c290df..0daeb13b16ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java @@ -89,7 +89,7 @@ public void testWALDurability() throws IOException { FileSystem fs = FileSystem.get(conf); Path rootDir = new Path(dir + getName()); T wal = getWAL(fs, rootDir, getName(), conf); - HRegion region = initHRegion(tableName, null, null, wal); + HRegion region = initHRegion(tableName, null, null, conf, wal); try { resetSyncFlag(wal); assertNull(getSyncFlag(wal)); @@ -114,7 +114,7 @@ public void testWALDurability() throws IOException { conf.set(HRegion.WAL_HSYNC_CONF_KEY, "true"); fs = FileSystem.get(conf); wal = getWAL(fs, rootDir, getName(), conf); - region = initHRegion(tableName, null, null, wal); + region = initHRegion(tableName, null, null, conf, wal); try { resetSyncFlag(wal); @@ -156,11 +156,11 @@ private String getName() { * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} * when done. */ - public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal) - throws IOException { + public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + Configuration conf, WAL wal) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, false, Durability.USE_DEFAULT, - wal, COLUMN_FAMILY_BYTES); + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, + Durability.USE_DEFAULT, wal, COLUMN_FAMILY_BYTES); } } From f37cd05c32ee878cca893adc7c8608ddbfdbd5e3 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 4 Nov 2020 11:02:28 +0800 Subject: [PATCH 166/769] HBASE-25235 Cleanup the deprecated methods in TimeRange (#2616) Signed-off-by: Jan Hentschel Signed-off-by: stack --- .../apache/hadoop/hbase/client/Append.java | 5 +- .../org/apache/hadoop/hbase/client/Get.java | 10 +-- .../apache/hadoop/hbase/client/Increment.java | 5 +- .../org/apache/hadoop/hbase/client/Query.java | 2 +- .../org/apache/hadoop/hbase/client/Scan.java | 4 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 16 +++- .../org/apache/hadoop/hbase/io/TimeRange.java | 86 ++----------------- .../hbase/regionserver/TimeRangeTracker.java | 2 +- .../coprocessor/TestAppendTimeRange.java | 8 +- .../coprocessor/TestIncrementTimeRange.java | 4 +- .../TestSimpleTimeRangeTracker.java | 14 +-- 11 files changed, 43 insertions(+), 113 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 922f46703eb5..41b3845fc784 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -68,7 +68,7 @@ public class Append extends Mutation { * @return this */ public Append setTimeRange(long minStamp, long maxStamp) { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -162,10 +162,9 @@ public Append addColumn(byte[] family, byte[] qualifier, byte[] value) { /** * Add column and value to this Append operation. - * @param cell * @return This instance */ - @SuppressWarnings("unchecked") + @Override public Append add(final Cell cell) { try { super.add(cell); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index d3b57fb461cf..0f04407ac3e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -74,7 +74,6 @@ public class Get extends Query implements Row { private int storeOffset = 0; private TimeRange tr = TimeRange.allTime(); private boolean checkExistenceOnly = false; - private boolean closestRowBefore = false; private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** @@ -199,11 +198,10 @@ public Get addColumn(byte [] family, byte [] qualifier) { * [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive - * @throws IOException * @return this for invocation chaining */ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -214,17 +212,17 @@ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { */ public Get setTimestamp(long timestamp) { try { - tr = new TimeRange(timestamp, timestamp + 1); + tr = TimeRange.at(timestamp); } catch(Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; } - return this; } - @Override public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { + @Override + public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { return (Get) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index df448eb91b6a..bd824d4a855f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -148,9 +148,8 @@ public TimeRange getTimeRange() { * @throws IOException if invalid time range * @return this */ - public Increment setTimeRange(long minStamp, long maxStamp) - throws IOException { - tr = new TimeRange(minStamp, maxStamp); + public Increment setTimeRange(long minStamp, long maxStamp) throws IOException { + tr = TimeRange.between(minStamp, maxStamp); return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 1d990d1bc942..919513ceb622 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -230,7 +230,7 @@ public boolean doLoadColumnFamiliesOnDemand() { */ public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { - colFamTimeRangeMap.put(cf, new TimeRange(minStamp, maxStamp)); + colFamTimeRangeMap.put(cf, TimeRange.between(minStamp, maxStamp)); return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index d515c550f0e9..36b116bd90af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -316,7 +316,7 @@ public Scan addColumn(byte [] family, byte [] qualifier) { * @return this */ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -350,7 +350,7 @@ public Scan setTimeStamp(long timestamp) */ public Scan setTimestamp(long timestamp) { try { - tr = new TimeRange(timestamp, timestamp + 1); + tr = TimeRange.at(timestamp); } catch(Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d5fdb89302c5..772183251634 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2861,10 +2861,18 @@ public static List toSecurityCapabilityList( } public static TimeRange toTimeRange(HBaseProtos.TimeRange timeRange) { - return timeRange == null ? - TimeRange.allTime() : - new TimeRange(timeRange.hasFrom() ? timeRange.getFrom() : 0, - timeRange.hasTo() ? timeRange.getTo() : Long.MAX_VALUE); + if (timeRange == null) { + return TimeRange.allTime(); + } + if (timeRange.hasFrom()) { + if (timeRange.hasTo()) { + return TimeRange.between(timeRange.getFrom(), timeRange.getTo()); + } else { + return TimeRange.from(timeRange.getFrom()); + } + } else { + return TimeRange.until(timeRange.getTo()); + } } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index fe229b692109..0dea94801b8a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -18,24 +18,23 @@ package org.apache.hadoop.hbase.io; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** * Represents an interval of version timestamps. Presumes timestamps between * {@link #INITIAL_MIN_TIMESTAMP} and {@link #INITIAL_MAX_TIMESTAMP} only. Gets freaked out if * passed a timestamp that is < {@link #INITIAL_MIN_TIMESTAMP}, - *

    + *

    * Evaluated according to minStamp <= timestamp < maxStamp or [minStamp,maxStamp) in interval * notation. - *

    + *

    * Can be returned and read by clients. Should not be directly created by clients. Thus, all * constructors are purposely @InterfaceAudience.Private. - *

    + *

    * Immutable. Thread-safe. */ @InterfaceAudience.Public -public class TimeRange { +public final class TimeRange { public static final long INITIAL_MIN_TIMESTAMP = 0L; public static final long INITIAL_MAX_TIMESTAMP = Long.MAX_VALUE; private static final TimeRange ALL_TIME = new TimeRange(INITIAL_MIN_TIMESTAMP, @@ -84,67 +83,13 @@ public static TimeRange between(long minStamp, long maxStamp) { private final long maxStamp; private final boolean allTime; - /** - * Default constructor. - * Represents interval [0, Long.MAX_VALUE) (allTime) - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange() { - this(INITIAL_MIN_TIMESTAMP, INITIAL_MAX_TIMESTAMP); - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(long minStamp) { - this(minStamp, INITIAL_MAX_TIMESTAMP); - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(byte [] minStamp) { - this(Bytes.toLong(minStamp)); - } - - /** - * Represents interval [minStamp, maxStamp) - * @param minStamp the minimum timestamp, inclusive - * @param maxStamp the maximum timestamp, exclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(byte [] minStamp, byte [] maxStamp) { - this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); - } - /** * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive * @throws IllegalArgumentException if either <0, - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(long minStamp, long maxStamp) { - check(minStamp, maxStamp); + private TimeRange(long minStamp, long maxStamp) { this.minStamp = minStamp; this.maxStamp = maxStamp; this.allTime = isAllTime(minStamp, maxStamp); @@ -188,27 +133,8 @@ public boolean isAllTime() { /** * Check if the specified timestamp is within this TimeRange. - *

    + *

    * Returns true if within interval [minStamp, maxStamp), false if not. - * @param bytes timestamp to check - * @param offset offset into the bytes - * @return true if within TimeRange, false if not - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. Use {@link #withinTimeRange(long)} instead - */ - @Deprecated - public boolean withinTimeRange(byte [] bytes, int offset) { - if (allTime) { - return true; - } - return withinTimeRange(Bytes.toLong(bytes, offset)); - } - - /** - * Check if the specified timestamp is within this TimeRange. - *

    - * Returns true if within interval [minStamp, maxStamp), false - * if not. * @param timestamp timestamp to check * @return true if within TimeRange, false if not */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 18175648f305..37923ad4d464 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -240,7 +240,7 @@ TimeRange toTimeRange() { if (max == INITIAL_MAX_TIMESTAMP) { max = TimeRange.INITIAL_MAX_TIMESTAMP; } - return new TimeRange(min, max); + return TimeRange.between(min, max); } @VisibleForTesting diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java index 51f0d7307c02..8dfc774300ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java @@ -128,15 +128,15 @@ public void testHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range10 = new TimeRange(1, time + 10); - Result r = table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")) - .setTimeRange(range10.getMin(), range10.getMax())); + TimeRange range10 = TimeRange.between(1, time + 10); + table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")) + .setTimeRange(range10.getMin(), range10.getMax())); checkRowValue(table, ROW, Bytes.toBytes("ab")); assertEquals(MyObserver.tr10.getMin(), range10.getMin()); assertEquals(MyObserver.tr10.getMax(), range10.getMax()); time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range2 = new TimeRange(1, time+20); + TimeRange range2 = TimeRange.between(1, time + 20); List actions = Arrays.asList(new Row[] { new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java index a74914f62135..1e822e4d2164 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java @@ -165,7 +165,7 @@ private void checkHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range10 = new TimeRange(1, time+10); + TimeRange range10 = TimeRange.between(1, time+10); hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L) .setTimeRange(range10.getMin(), range10.getMax())); checkRowValue(ROW_A, Bytes.toBytes(11L)); @@ -174,7 +174,7 @@ private void checkHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range2 = new TimeRange(1, time+20); + TimeRange range2 = TimeRange.between(1, time + 20); List actions = Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L) .setTimeRange(range2.getMin(), range2.getMax()), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java index 2858419f0378..2a3f018562cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java @@ -61,7 +61,7 @@ public void testExtreme() { @Test public void testTimeRangeInitialized() { TimeRangeTracker src = getTimeRangeTracker(); - TimeRange tr = new TimeRange(System.currentTimeMillis()); + TimeRange tr = TimeRange.from(System.currentTimeMillis()); assertFalse(src.includesTimeRange(tr)); } @@ -108,7 +108,7 @@ public void testSimpleInRange() { TimeRangeTracker trr = getTimeRangeTracker(); trr.includeTimestamp(0); trr.includeTimestamp(2); - assertTrue(trr.includesTimeRange(new TimeRange(1))); + assertTrue(trr.includesTimeRange(TimeRange.from(1))); } @Test @@ -118,27 +118,27 @@ public void testRangeConstruction() throws IOException { assertEquals(Long.MAX_VALUE, defaultRange.getMax()); assertTrue(defaultRange.isAllTime()); - TimeRange oneArgRange = new TimeRange(0L); + TimeRange oneArgRange = TimeRange.from(0L); assertEquals(0L, oneArgRange.getMin()); assertEquals(Long.MAX_VALUE, oneArgRange.getMax()); assertTrue(oneArgRange.isAllTime()); - TimeRange oneArgRange2 = new TimeRange(1); + TimeRange oneArgRange2 = TimeRange.from(1); assertEquals(1, oneArgRange2.getMin()); assertEquals(Long.MAX_VALUE, oneArgRange2.getMax()); assertFalse(oneArgRange2.isAllTime()); - TimeRange twoArgRange = new TimeRange(0L, Long.MAX_VALUE); + TimeRange twoArgRange = TimeRange.between(0L, Long.MAX_VALUE); assertEquals(0L, twoArgRange.getMin()); assertEquals(Long.MAX_VALUE, twoArgRange.getMax()); assertTrue(twoArgRange.isAllTime()); - TimeRange twoArgRange2 = new TimeRange(0L, Long.MAX_VALUE - 1); + TimeRange twoArgRange2 = TimeRange.between(0L, Long.MAX_VALUE - 1); assertEquals(0L, twoArgRange2.getMin()); assertEquals(Long.MAX_VALUE - 1, twoArgRange2.getMax()); assertFalse(twoArgRange2.isAllTime()); - TimeRange twoArgRange3 = new TimeRange(1, Long.MAX_VALUE); + TimeRange twoArgRange3 = TimeRange.between(1, Long.MAX_VALUE); assertEquals(1, twoArgRange3.getMin()); assertEquals(Long.MAX_VALUE, twoArgRange3.getMax()); assertFalse(twoArgRange3.isAllTime()); From 49774c7e18f4a4cf4cccd1617014d8345fb1e8e2 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 4 Nov 2020 17:54:18 +0800 Subject: [PATCH 167/769] HBASE-25216 The client zk syncer should deal with meta replica count change (#2614) Signed-off-by: Yu Li --- .../apache/hadoop/hbase/master/HMaster.java | 8 +- .../hadoop/hbase/master/MasterServices.java | 8 + .../procedure/ModifyTableProcedure.java | 7 + .../hbase/master/zksyncer/ClientZKSyncer.java | 216 +++++++++++++----- .../master/zksyncer/MasterAddressSyncer.java | 13 +- .../master/zksyncer/MetaLocationSyncer.java | 24 +- .../client/TestSeparateClientZKCluster.java | 62 +++-- .../hbase/master/MockNoopMasterServices.java | 6 + .../hbase/master/TestMasterNoCluster.java | 2 +- 9 files changed, 244 insertions(+), 102 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index f9123046eef2..8cb399a476e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -322,8 +322,9 @@ public void run() { // Tracker for load balancer state LoadBalancerTracker loadBalancerTracker; // Tracker for meta location, if any client ZK quorum specified - MetaLocationSyncer metaLocationSyncer; + private MetaLocationSyncer metaLocationSyncer; // Tracker for active master location, if any client ZK quorum specified + @VisibleForTesting MasterAddressSyncer masterAddressSyncer; // Tracker for auto snapshot cleanup state SnapshotCleanupTracker snapshotCleanupTracker; @@ -3852,4 +3853,9 @@ public CompactionState getCompactionState(final TableName tableName) { } return compactionState; } + + @Override + public MetaLocationSyncer getMetaLocationSyncer() { + return metaLocationSyncer; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 384785d738f6..c5f0f3c4bcad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.LockedResource; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -570,4 +571,11 @@ default SplitWALManager getSplitWALManager(){ */ boolean normalizeRegions( final NormalizeTableFilterParams ntfp, final boolean isHighPriority) throws IOException; + + /** + * Get the meta location syncer. + *

    + * We need to get this in MTP to tell the syncer the new meta replica count. + */ + MetaLocationSyncer getMetaLocationSyncer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index beb129b6f52b..247dd9c202f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; @@ -157,6 +158,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_ASSIGN_NEW_REPLICAS: assignNewReplicasIfNeeded(env); + if (TableName.isMetaTableName(getTableName())) { + MetaLocationSyncer syncer = env.getMasterServices().getMetaLocationSyncer(); + if (syncer != null) { + syncer.setMetaReplicaCount(modifiedTableDescriptor.getRegionReplication()); + } + } if (deleteColumnFamilyInModify) { setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java index 38dc11218687..51208e37d4e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java @@ -19,12 +19,11 @@ package org.apache.hadoop.hbase.master.zksyncer; import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; +import java.util.Iterator; import java.util.Map; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.util.Threads; @@ -34,7 +33,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,22 +40,68 @@ * Tracks the target znode(s) on server ZK cluster and synchronize them to client ZK cluster if * changed *

    - * The target znode(s) is given through {@link #getNodesToWatch()} method + * The target znode(s) is given through {@link #getPathsToWatch()} method */ @InterfaceAudience.Private public abstract class ClientZKSyncer extends ZKListener { private static final Logger LOG = LoggerFactory.getLogger(ClientZKSyncer.class); private final Server server; private final ZKWatcher clientZkWatcher; + + /** + * Used to store the newest data which we want to sync to client zk. + *

    + * For meta location, since we may reduce the replica number, so here we add a {@code delete} flag + * to tell the updater delete the znode on client zk and quit. + */ + private static final class ZKData { + + byte[] data; + + boolean delete = false; + + synchronized void set(byte[] data) { + this.data = data; + notifyAll(); + } + + synchronized byte[] get() throws InterruptedException { + while (!delete && data == null) { + wait(); + } + byte[] d = data; + data = null; + return d; + } + + synchronized void delete() { + this.delete = true; + notifyAll(); + } + + synchronized boolean isDeleted() { + return delete; + } + } + // We use queues and daemon threads to synchronize the data to client ZK cluster // to avoid blocking the single event thread for watchers - private final Map> queues; + private final ConcurrentMap queues; public ClientZKSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server server) { super(watcher); this.server = server; this.clientZkWatcher = clientZkWatcher; - this.queues = new HashMap<>(); + this.queues = new ConcurrentHashMap<>(); + } + + private void startNewSyncThread(String path) { + ZKData zkData = new ZKData(); + queues.put(path, zkData); + Thread updater = new ClientZkUpdater(path, zkData); + updater.setDaemon(true); + updater.start(); + watchAndCheckExists(path); } /** @@ -69,17 +113,12 @@ public void start() throws KeeperException { this.watcher.registerListener(this); // create base znode on remote ZK ZKUtil.createWithParents(clientZkWatcher, watcher.getZNodePaths().baseZNode); - // set meta znodes for client ZK - Collection nodes = getNodesToWatch(); - LOG.debug("Znodes to watch: " + nodes); + // set znodes for client ZK + Set paths = getPathsToWatch(); + LOG.debug("ZNodes to watch: {}", paths); // initialize queues and threads - for (String node : nodes) { - BlockingQueue queue = new ArrayBlockingQueue<>(1); - queues.put(node, queue); - Thread updater = new ClientZkUpdater(node, queue); - updater.setDaemon(true); - updater.start(); - watchAndCheckExists(node); + for (String path : paths) { + startNewSyncThread(path); } } @@ -112,10 +151,9 @@ private void watchAndCheckExists(String node) { * @param data the data to write to queue */ private void upsertQueue(String node, byte[] data) { - BlockingQueue queue = queues.get(node); - synchronized (queue) { - queue.poll(); - queue.offer(data); + ZKData zkData = queues.get(node); + if (zkData != null) { + zkData.set(data); } } @@ -126,35 +164,49 @@ private void upsertQueue(String node, byte[] data) { * @param data the data to set to client ZK * @throws InterruptedException if the thread is interrupted during process */ - private final void setDataForClientZkUntilSuccess(String node, byte[] data) - throws InterruptedException { + private void setDataForClientZkUntilSuccess(String node, byte[] data) + throws InterruptedException { + boolean create = false; while (!server.isStopped()) { try { LOG.debug("Set data for remote " + node + ", client zk wather: " + clientZkWatcher); - ZKUtil.setData(clientZkWatcher, node, data); - break; - } catch (KeeperException.NoNodeException nne) { - // Node doesn't exist, create it and set value - try { + if (create) { ZKUtil.createNodeIfNotExistsNoWatch(clientZkWatcher, node, data, CreateMode.PERSISTENT); - break; - } catch (KeeperException.ConnectionLossException - | KeeperException.SessionExpiredException ee) { - reconnectAfterExpiration(); - } catch (KeeperException e) { - LOG.warn( - "Failed to create znode " + node + " due to: " + e.getMessage() + ", will retry later"); + } else { + ZKUtil.setData(clientZkWatcher, node, data); } - } catch (KeeperException.ConnectionLossException - | KeeperException.SessionExpiredException ee) { - reconnectAfterExpiration(); + break; } catch (KeeperException e) { - LOG.debug("Failed to set data to client ZK, will retry later", e); + LOG.debug("Failed to set data for {} to client ZK, will retry later", node, e); + if (e.code() == KeeperException.Code.SESSIONEXPIRED) { + reconnectAfterExpiration(); + } + if (e.code() == KeeperException.Code.NONODE) { + create = true; + } + if (e.code() == KeeperException.Code.NODEEXISTS) { + create = false; + } } Threads.sleep(HConstants.SOCKET_RETRY_WAIT_MS); } } + private void deleteDataForClientZkUntilSuccess(String node) throws InterruptedException { + while (!server.isStopped()) { + LOG.debug("Delete remote " + node + ", client zk wather: " + clientZkWatcher); + try { + ZKUtil.deleteNode(clientZkWatcher, node); + } catch (KeeperException e) { + LOG.debug("Failed to delete node from client ZK, will retry later", e); + if (e.code() == KeeperException.Code.SESSIONEXPIRED) { + reconnectAfterExpiration(); + } + + } + } + } + private final void reconnectAfterExpiration() throws InterruptedException { LOG.warn("ZK session expired or lost. Retry a new connection..."); try { @@ -164,11 +216,7 @@ private final void reconnectAfterExpiration() throws InterruptedException { } } - @Override - public void nodeCreated(String path) { - if (!validate(path)) { - return; - } + private void getDataAndWatch(String path) { try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); upsertQueue(path, data); @@ -177,23 +225,39 @@ public void nodeCreated(String path) { } } + private void removeQueue(String path) { + ZKData zkData = queues.remove(path); + if (zkData != null) { + zkData.delete(); + } + } + @Override - public void nodeDataChanged(String path) { + public void nodeCreated(String path) { if (validate(path)) { - nodeCreated(path); + getDataAndWatch(path); + } else { + removeQueue(path); } } + @Override + public void nodeDataChanged(String path) { + nodeCreated(path); + } + @Override public synchronized void nodeDeleted(String path) { if (validate(path)) { try { if (ZKUtil.watchAndCheckExists(watcher, path)) { - nodeCreated(path); + getDataAndWatch(path); } } catch (KeeperException e) { LOG.warn("Unexpected exception handling nodeDeleted event for path: " + path, e); } + } else { + removeQueue(path); } } @@ -202,41 +266,67 @@ public synchronized void nodeDeleted(String path) { * @param path the path to validate * @return true if the znode is watched by us */ - abstract boolean validate(String path); + protected abstract boolean validate(String path); /** - * @return the znode(s) to watch + * @return the zk path(s) to watch */ - abstract Collection getNodesToWatch() throws KeeperException; + protected abstract Set getPathsToWatch(); + + protected final void refreshWatchingList() { + Set newPaths = getPathsToWatch(); + LOG.debug("New ZNodes to watch: {}", newPaths); + Iterator> iter = queues.entrySet().iterator(); + // stop unused syncers + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + if (!newPaths.contains(entry.getKey())) { + iter.remove(); + entry.getValue().delete(); + } + } + // start new syncers + for (String newPath : newPaths) { + if (!queues.containsKey(newPath)) { + startNewSyncThread(newPath); + } + } + } /** * Thread to synchronize znode data to client ZK cluster */ - class ClientZkUpdater extends Thread { - final String znode; - final BlockingQueue queue; + private final class ClientZkUpdater extends Thread { + private final String znode; + private final ZKData zkData; - public ClientZkUpdater(String znode, BlockingQueue queue) { + public ClientZkUpdater(String znode, ZKData zkData) { this.znode = znode; - this.queue = queue; + this.zkData = zkData; setName("ClientZKUpdater-" + znode); } @Override public void run() { + LOG.debug("Client zk updater for znode {} started", znode); while (!server.isStopped()) { try { - byte[] data = queue.take(); - setDataForClientZkUntilSuccess(znode, data); - } catch (InterruptedException e) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Interrupted while checking whether need to update meta location to client zk"); + byte[] data = zkData.get(); + if (data != null) { + setDataForClientZkUntilSuccess(znode, data); + } else { + if (zkData.isDeleted()) { + deleteDataForClientZkUntilSuccess(znode); + break; + } } + } catch (InterruptedException e) { + LOG.debug("Interrupted while checking whether need to update meta location to client zk"); Thread.currentThread().interrupt(); break; } } + LOG.debug("Client zk updater for znode {} stopped", znode); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java index a9aa13cb93d3..ee04238d0b95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java @@ -18,9 +18,8 @@ */ package org.apache.hadoop.hbase.master.zksyncer; -import java.util.ArrayList; -import java.util.Collection; - +import java.util.Collections; +import java.util.Set; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -39,14 +38,12 @@ public MasterAddressSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server } @Override - boolean validate(String path) { + protected boolean validate(String path) { return path.equals(masterAddressZNode); } @Override - Collection getNodesToWatch() { - ArrayList toReturn = new ArrayList<>(); - toReturn.add(masterAddressZNode); - return toReturn; + protected Set getPathsToWatch() { + return Collections.singleton(masterAddressZNode); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java index dca5cadf8adf..f6e38329ac34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java @@ -18,13 +18,12 @@ */ package org.apache.hadoop.hbase.master.zksyncer; -import java.util.Collection; +import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; /** * Tracks the meta region locations on server ZK cluster and synchronize them to client ZK cluster @@ -32,19 +31,28 @@ */ @InterfaceAudience.Private public class MetaLocationSyncer extends ClientZKSyncer { + + private volatile int metaReplicaCount = 1; + public MetaLocationSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server server) { super(watcher, clientZkWatcher, server); } @Override - boolean validate(String path) { + protected boolean validate(String path) { return watcher.getZNodePaths().isMetaZNodePath(path); } @Override - Collection getNodesToWatch() throws KeeperException { - return watcher.getMetaReplicaNodes().stream() - .map(znode -> ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode)) - .collect(Collectors.toList()); + protected Set getPathsToWatch() { + return IntStream.range(0, metaReplicaCount) + .mapToObj(watcher.getZNodePaths()::getZNodeForReplica).collect(Collectors.toSet()); + } + + public void setMetaReplicaCount(int replicaCount) { + if (replicaCount != metaReplicaCount) { + metaReplicaCount = replicaCount; + refreshWatchingList(); + } } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 4665e8417de7..7fc955234557 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -26,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -35,13 +40,11 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,11 +64,11 @@ public class TestSeparateClientZKCluster { private final byte[] newVal = Bytes.toBytes("v2"); @Rule - public TestName name = new TestName(); + public TableNameTestRule name = new TableNameTestRule(); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSeparateClientZKCluster.class); + HBaseClassTestRule.forClass(TestSeparateClientZKCluster.class); @BeforeClass public static void beforeAllTests() throws Exception { @@ -78,13 +81,15 @@ public static void beforeAllTests() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.client.start.log.errors.counter", -1); TEST_UTIL.getConfiguration().setInt("zookeeper.recovery.retry", 1); // core settings for testing client ZK cluster + TEST_UTIL.getConfiguration().setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + ZKConnectionRegistry.class, ConnectionRegistry.class); TEST_UTIL.getConfiguration().set(HConstants.CLIENT_ZOOKEEPER_QUORUM, HConstants.LOCALHOST); TEST_UTIL.getConfiguration().setInt(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT, clientZkPort); // reduce zk session timeout to easier trigger session expiration TEST_UTIL.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, ZK_SESSION_TIMEOUT); // Start a cluster with 2 masters and 3 regionservers. - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numMasters(2).numRegionServers(3).numDataNodes(3).build(); + StartMiniClusterOption option = + StartMiniClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build(); TEST_UTIL.startMiniCluster(option); } @@ -97,7 +102,7 @@ public static void afterAllTests() throws Exception { @Test public void testBasicOperation() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); // create table Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) { @@ -113,7 +118,7 @@ public void testBasicOperation() throws Exception { Get get = new Get(row); Result result = table.get(get); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(value, result.getValue(family, qualifier)); + assertArrayEquals(value, result.getValue(family, qualifier)); } } @@ -133,24 +138,24 @@ public void testMasterSwitch() throws Exception { } LOG.info("Shutdown master {}", master.getServerName()); while (cluster.getMaster() == null || !cluster.getMaster().isInitialized()) { - LOG.info("Get master {}", cluster.getMaster() == null? "null": - cluster.getMaster().getServerName()); + LOG.info("Get master {}", + cluster.getMaster() == null ? "null" : cluster.getMaster().getServerName()); Thread.sleep(200); } LOG.info("Got master {}", cluster.getMaster().getServerName()); // confirm client access still works - Assert.assertTrue(admin.balance(false)); + assertTrue(admin.balance(false)); } } @Test public void testMetaRegionMove() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); // create table Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); - Table table = conn.getTable(tn); - RegionLocator locator = conn.getRegionLocator(tn)) { + Table table = conn.getTable(tn); + RegionLocator locator = conn.getRegionLocator(tn)) { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); @@ -191,13 +196,13 @@ public void testMetaRegionMove() throws Exception { table.put(put); result = table.get(get); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(newVal, result.getValue(family, qualifier)); + assertArrayEquals(newVal, result.getValue(family, qualifier)); } } @Test public void testMetaMoveDuringClientZkClusterRestart() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); // create table Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) { @@ -233,18 +238,18 @@ public void testMetaMoveDuringClientZkClusterRestart() throws Exception { Get get = new Get(row); Result result = table.get(get); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(value, result.getValue(family, qualifier)); + assertArrayEquals(value, result.getValue(family, qualifier)); } } @Test public void testAsyncTable() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); TableDescriptorBuilder tableDescBuilder = - TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); + TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); try (AsyncConnection ASYNC_CONN = - ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { ASYNC_CONN.getAdmin().createTable(tableDescBuilder.build()).get(); AsyncTable table = ASYNC_CONN.getTable(tn); // put some data @@ -255,7 +260,22 @@ public void testAsyncTable() throws Exception { Get get = new Get(row); Result result = table.get(get).get(); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(value, result.getValue(family, qualifier)); + assertArrayEquals(value, result.getValue(family, qualifier)); + } + } + + @Test + public void testChangeMetaReplicaCount() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + try (RegionLocator locator = + TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + assertEquals(1, locator.getAllRegionLocations().size()); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 3); + TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 3); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 2); + TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 2); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); + TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 1); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 3f3e80960bb9..933addfbf600 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.LockedResource; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -514,4 +515,9 @@ public boolean isBalancerOn() { public boolean normalizeRegions(NormalizeTableFilterParams ntfp, boolean isHighPriority) { return false; } + + @Override + public MetaLocationSyncer getMetaLocationSyncer() { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 5979f4845a93..bdeab3d28a25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -160,7 +160,7 @@ public void testMasterInitWithObserverModeClientZKQuorum() throws Exception { while (!master.isInitialized()) { Threads.sleep(200); } - Assert.assertNull(master.metaLocationSyncer); + Assert.assertNull(master.getMetaLocationSyncer()); Assert.assertNull(master.masterAddressSyncer); master.stopMaster(); master.join(); From 4bd9ee43a4b12820d4d6346973fe8f37140786df Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 4 Nov 2020 16:05:42 +0530 Subject: [PATCH 168/769] HBASE-25245 : Fixing incorrect maven and jdk names for generate-hbase-website Closes #2624 Signed-off-by: Duo Zhang --- .../jenkins-scripts/generate-hbase-website.Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile index 76b7d3d4140f..7e8ec44a4e6a 100644 --- a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile +++ b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile @@ -37,9 +37,9 @@ pipeline { stages { stage ('generate hbase website') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } steps { dir('hbase') { From 0e71d6192ae731b3c05726ea49bb3b623d5e5487 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Thu, 5 Nov 2020 00:21:31 +0800 Subject: [PATCH 169/769] HBASE-25053 WAL replay should ignore 0-length files (#2437) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/regionserver/HRegion.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index bca18dbcb013..d0e628432b13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5372,6 +5372,11 @@ long replayRecoveredEditsIfAny(Map maxSeqIdInStores, recoveredEditsDir); if (files != null) { for (FileStatus file : files) { + // it is safe to trust the zero-length in this case because we've been through rename and + // lease recovery in the above. + if (isZeroLengthThenDelete(fs, file, file.getPath())) { + continue; + } seqId = Math.max(seqId, replayRecoveredEdits(file.getPath(), maxSeqIdInStores, reporter, fs)); } @@ -6536,6 +6541,8 @@ protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreAccountin } /** + * make sure have been through lease recovery before get file status, so the file length can be + * trusted. * @param p File to check. * @return True if file was zero-length (and if so, we'll delete it in here). * @throws IOException From 0356e8efd135ddb9f47536ef11544799e52de5e0 Mon Sep 17 00:00:00 2001 From: WenFeiYi Date: Thu, 5 Nov 2020 19:55:08 +0530 Subject: [PATCH 170/769] HBASE-25240 gson format of RpcServer.logResponse is abnormal Closes #2623 Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/util/GsonUtil.java | 4 ++ .../hadoop/hbase/util/TestGsonUtil.java | 48 +++++++++++++++++++ .../apache/hadoop/hbase/ipc/RpcServer.java | 2 +- 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java index 80be4af72f13..59c2d80f4d18 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java @@ -58,4 +58,8 @@ public LongAdder read(JsonReader in) throws IOException { } }); } + + public static GsonBuilder createGsonWithDisableHtmlEscaping() { + return createGson().disableHtmlEscaping(); + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java new file mode 100644 index 000000000000..fbfc0b952dae --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MiscTests.class, SmallTests.class }) +public class TestGsonUtil { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestGsonUtil.class); + + private static final Gson GSON = GsonUtil.createGson().create(); + private static final Gson DHE_GSON = GsonUtil.createGsonWithDisableHtmlEscaping().create(); + + @Test + public void testDisableHtmlEscaping() { + // enable html escaping, turn '=' into '\u003d' + assertEquals("\"\\u003d\\u003d\\u003d\"", GSON.toJson("===")); + + // disable html escaping + assertEquals("\"===\"", DHE_GSON.toJson("===")); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index cace5f0240f4..7bae06f601bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -200,7 +200,7 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String TRACE_LOG_MAX_LENGTH = "hbase.ipc.trace.log.max.length"; protected static final String KEY_WORD_TRUNCATED = " "; - protected static final Gson GSON = GsonUtil.createGson().create(); + protected static final Gson GSON = GsonUtil.createGsonWithDisableHtmlEscaping().create(); protected final int maxRequestSize; protected final int warnResponseTime; From 23e656712b7aa9fe0857b77013d9a6f5a044f5cc Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 5 Nov 2020 08:36:55 -0800 Subject: [PATCH 171/769] =?UTF-8?q?HBASE-25238=20Upgrading=20HBase=20from?= =?UTF-8?q?=202.2.0=20to=202.3.x=20fails=20because=20of=20=E2=80=9CMessage?= =?UTF-8?q?=20missing=20required=20fields:=20state=E2=80=9D=20(#2625)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make protobuf fields add post-2.0.0 release marked 'required' instead be 'optional' so migrations from 2.0.x to 2.1+ or 2.2+ succeeds. Signed-off-by: Viraj Jasani vjasani@apache.org --- .../apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 4 ++-- .../src/main/protobuf/server/ClusterStatus.proto | 7 +++++-- .../src/main/protobuf/server/master/MasterProcedure.proto | 4 +++- .../hbase/master/assignment/RegionRemoteProcedureBase.java | 5 ++++- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 772183251634..f425984a95f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2797,8 +2797,8 @@ public static ReplicationLoadSink toReplicationLoadSink( ClusterStatusProtos.ReplicationLoadSink rls) { return new ReplicationLoadSink(rls.getAgeOfLastAppliedOp(), rls.getTimeStampsOfLastAppliedOp(), - rls.getTimestampStarted(), - rls.getTotalOpsProcessed()); + rls.hasTimestampStarted()? rls.getTimestampStarted(): -1L, + rls.hasTotalOpsProcessed()? rls.getTotalOpsProcessed(): -1L); } public static ReplicationLoadSource toReplicationLoadSource( diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 35f3c2d054b5..dc875daf7976 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -207,8 +207,11 @@ message ClientMetrics { message ReplicationLoadSink { required uint64 ageOfLastAppliedOp = 1; required uint64 timeStampsOfLastAppliedOp = 2; - required uint64 timestampStarted = 3; - required uint64 totalOpsProcessed = 4; + // The below two were added after hbase-2.0.0 went out. They have to be added as 'optional' else + // we break upgrades; old RegionServers reporting in w/ old forms of this message will fail to + // deserialize on the new Master. See HBASE-25234 + optional uint64 timestampStarted = 3; + optional uint64 totalOpsProcessed = 4; } message ReplicationLoadSource { diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 8d8b9af009cd..76b085d43c8e 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -573,7 +573,9 @@ enum RegionRemoteProcedureBaseState { message RegionRemoteProcedureBaseStateData { required RegionInfo region = 1; required ServerName target_server = 2; - required RegionRemoteProcedureBaseState state = 3; + // state is actually 'required' but we can't set it as 'required' here else it breaks old + // Messages; see HBASE-22074. + optional RegionRemoteProcedureBaseState state = 3; optional RegionStateTransition.TransitionCode transition_code = 4; optional int64 seq_id = 5; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index 1c90d81ed06f..805b51caebec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -352,7 +352,10 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws serializer.deserialize(RegionRemoteProcedureBaseStateData.class); region = ProtobufUtil.toRegionInfo(data.getRegion()); targetServer = ProtobufUtil.toServerName(data.getTargetServer()); - state = data.getState(); + // 'state' may not be present if we are reading an 'old' form of this pb Message. + if (data.hasState()) { + state = data.getState(); + } if (data.hasTransitionCode()) { transitionCode = data.getTransitionCode(); seqId = data.getSeqId(); From 671129df5666d25c4012cde179c0bd1bae4b164f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 7 Nov 2020 20:05:04 +0800 Subject: [PATCH 172/769] HBASE-25252 Move HMaster inner classes out (#2628) Signed-off-by: Viraj Jasani Signed-off-by: Wellington Chevreuil Signed-off-by: Guanghao Zhang --- .../apache/hadoop/hbase/master/HMaster.java | 125 ++---------------- .../master/MasterInitializationMonitor.java | 80 +++++++++++ .../hbase/master/MasterRedirectServlet.java | 81 ++++++++++++ 3 files changed, 174 insertions(+), 112 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8cb399a476e5..573838f58709 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -50,10 +50,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; import java.util.stream.Collectors; -import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -95,7 +92,6 @@ import org.apache.hadoop.hbase.exceptions.MasterStoppedException; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -238,76 +234,23 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** - * HMaster is the "master server" for HBase. An HBase cluster has one active - * master. If many masters are started, all compete. Whichever wins goes on to - * run the cluster. All others park themselves in their constructor until - * master or cluster shutdown or until the active master loses its lease in - * zookeeper. Thereafter, all running master jostle to take over master role. - * - *

    The Master can be asked shutdown the cluster. See {@link #shutdown()}. In - * this case it will tell all regionservers to go down and then wait on them - * all reporting in that they are down. This master will then shut itself down. - * - *

    You can also shutdown just this master. Call {@link #stopMaster()}. - * + * HMaster is the "master server" for HBase. An HBase cluster has one active master. If many masters + * are started, all compete. Whichever wins goes on to run the cluster. All others park themselves + * in their constructor until master or cluster shutdown or until the active master loses its lease + * in zookeeper. Thereafter, all running master jostle to take over master role. + *

    + * The Master can be asked shutdown the cluster. See {@link #shutdown()}. In this case it will tell + * all regionservers to go down and then wait on them all reporting in that they are down. This + * master will then shut itself down. + *

    + * You can also shutdown just this master. Call {@link #stopMaster()}. * @see org.apache.zookeeper.Watcher */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @SuppressWarnings("deprecation") public class HMaster extends HRegionServer implements MasterServices { - private static Logger LOG = LoggerFactory.getLogger(HMaster.class); - - /** - * Protection against zombie master. Started once Master accepts active responsibility and - * starts taking over responsibilities. Allows a finite time window before giving up ownership. - */ - private static class InitializationMonitor extends Thread { - /** The amount of time in milliseconds to sleep before checking initialization status. */ - public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout"; - public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES); - - /** - * When timeout expired and initialization has not complete, call {@link System#exit(int)} when - * true, do nothing otherwise. - */ - public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout"; - public static final boolean HALT_DEFAULT = false; - private final HMaster master; - private final long timeout; - private final boolean haltOnTimeout; - - /** Creates a Thread that monitors the {@link #isInitialized()} state. */ - InitializationMonitor(HMaster master) { - super("MasterInitializationMonitor"); - this.master = master; - this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT); - this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT); - this.setDaemon(true); - } - - @Override - public void run() { - try { - while (!master.isStopped() && master.isActiveMaster()) { - Thread.sleep(timeout); - if (master.isInitialized()) { - LOG.debug("Initialization completed within allotted tolerance. Monitor exiting."); - } else { - LOG.error("Master failed to complete initialization after " + timeout + "ms. Please" - + " consider submitting a bug report including a thread dump of this process."); - if (haltOnTimeout) { - LOG.error("Zombie Master exiting. Thread dump to stdout"); - Threads.printThreadInfo(System.out, "Zombie HMaster"); - System.exit(-1); - } - } - } - } catch (InterruptedException ie) { - LOG.trace("InitMonitor thread interrupted. Existing."); - } - } - } + private static final Logger LOG = LoggerFactory.getLogger(HMaster.class); // MASTER is name of the webapp and the attribute name used stuffing this //instance into web context. @@ -464,48 +407,6 @@ public void run() { // Cached clusterId on stand by masters to serve clusterID requests from clients. private final CachedClusterId cachedClusterId; - public static class RedirectServlet extends HttpServlet { - private static final long serialVersionUID = 2894774810058302473L; - private final int regionServerInfoPort; - private final String regionServerHostname; - - /** - * @param infoServer that we're trying to send all requests to - * @param hostname may be null. if given, will be used for redirects instead of host from client. - */ - public RedirectServlet(InfoServer infoServer, String hostname) { - regionServerInfoPort = infoServer.getPort(); - regionServerHostname = hostname; - } - - @Override - public void doGet(HttpServletRequest request, - HttpServletResponse response) throws ServletException, IOException { - String redirectHost = regionServerHostname; - if(redirectHost == null) { - redirectHost = request.getServerName(); - if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) { - LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" + - MASTER_HOSTNAME_KEY + "' is not set; client will get an HTTP 400 response. If " + - "your HBase deployment relies on client accessible names that the region server process " + - "can't resolve locally, then you should set the previously mentioned configuration variable " + - "to an appropriate hostname."); - // no sending client provided input back to the client, so the goal host is just in the logs. - response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " + - "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " + - "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname."); - return; - } - } - // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the - // host and port we're using, but it's buried way too deep to do that ATM. - String redirectUrl = request.getScheme() + "://" - + redirectHost + ":" + regionServerInfoPort - + request.getRequestURI(); - response.sendRedirect(redirectUrl); - } - } - /** * Initializes the HMaster. The steps are as follows: *

    @@ -678,7 +579,7 @@ private int putUpJettyServer() throws IOException { final String redirectHostname = StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; - final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname); + final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname); final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); context.addServlet(new ServletHolder(redirect), "/*"); context.setServer(masterJettyServer); @@ -998,7 +899,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc this.activeMaster = true; // Start the Zombie master detector after setting master as active, see HBASE-21535 - Thread zombieDetector = new Thread(new InitializationMonitor(this), + Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), "ActiveMasterInitializationMonitor-" + System.currentTimeMillis()); zombieDetector.setDaemon(true); zombieDetector.start(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java new file mode 100644 index 000000000000..dcfeeab41309 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Protection against zombie master. Started once Master accepts active responsibility and starts + * taking over responsibilities. Allows a finite time window before giving up ownership. + */ +@InterfaceAudience.Private +class MasterInitializationMonitor extends Thread { + + private static final Logger LOG = LoggerFactory.getLogger(MasterInitializationMonitor.class); + + /** The amount of time in milliseconds to sleep before checking initialization status. */ + public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout"; + public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES); + + /** + * When timeout expired and initialization has not complete, call {@link System#exit(int)} when + * true, do nothing otherwise. + */ + public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout"; + public static final boolean HALT_DEFAULT = false; + + private final HMaster master; + private final long timeout; + private final boolean haltOnTimeout; + + /** Creates a Thread that monitors the {@link #isInitialized()} state. */ + MasterInitializationMonitor(HMaster master) { + super("MasterInitializationMonitor"); + this.master = master; + this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT); + this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT); + this.setDaemon(true); + } + + @Override + public void run() { + try { + while (!master.isStopped() && master.isActiveMaster()) { + Thread.sleep(timeout); + if (master.isInitialized()) { + LOG.debug("Initialization completed within allotted tolerance. Monitor exiting."); + } else { + LOG.error("Master failed to complete initialization after " + timeout + "ms. Please" + + " consider submitting a bug report including a thread dump of this process."); + if (haltOnTimeout) { + LOG.error("Zombie Master exiting. Thread dump to stdout"); + Threads.printThreadInfo(System.out, "Zombie HMaster"); + System.exit(-1); + } + } + } + } catch (InterruptedException ie) { + LOG.trace("InitMonitor thread interrupted. Existing."); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java new file mode 100644 index 000000000000..bda2934dbfb2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY; + +import java.io.IOException; +import java.net.InetAddress; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.hbase.http.InfoServer; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +class MasterRedirectServlet extends HttpServlet { + + private static final long serialVersionUID = 2894774810058302473L; + + private static final Logger LOG = LoggerFactory.getLogger(MasterRedirectServlet.class); + + private final int regionServerInfoPort; + private final String regionServerHostname; + + /** + * @param infoServer that we're trying to send all requests to + * @param hostname may be null. if given, will be used for redirects instead of host from client. + */ + public MasterRedirectServlet(InfoServer infoServer, String hostname) { + regionServerInfoPort = infoServer.getPort(); + regionServerHostname = hostname; + } + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + String redirectHost = regionServerHostname; + if (redirectHost == null) { + redirectHost = request.getServerName(); + if (!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) { + LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" + + MASTER_HOSTNAME_KEY + "' is not set; client will get an HTTP 400 response. If " + + "your HBase deployment relies on client accessible names that the region server " + + "process can't resolve locally, then you should set the previously mentioned " + + "configuration variable to an appropriate hostname."); + // no sending client provided input back to the client, so the goal host is just in the + // logs. + response.sendError(400, + "Request was to a host that I can't resolve for any of the network interfaces on " + + "this node. If this is due to an intermediary such as an HTTP load balancer or " + + "other proxy, your HBase administrator can set '" + MASTER_HOSTNAME_KEY + + "' to point to the correct hostname."); + return; + } + } + // TODO: this scheme should come from looking at the scheme registered in the infoserver's http + // server for the host and port we're using, but it's buried way too deep to do that ATM. + String redirectUrl = request.getScheme() + "://" + redirectHost + ":" + regionServerInfoPort + + request.getRequestURI(); + response.sendRedirect(redirectUrl); + } +} \ No newline at end of file From c36ee4fac00c7276393d0c7576c7165564ed13eb Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 8 Nov 2020 21:47:18 +0800 Subject: [PATCH 173/769] HBASE-25254 Rewrite TestMultiLogThreshold to remove the LogDelegate in RSRpcServices (#2631) Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/RSRpcServices.java | 42 ++---- .../regionserver/TestMultiLogThreshold.java | 121 +++++++++++------- 2 files changed, 83 insertions(+), 80 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index e15e8e9c1753..ec280b8b01c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -107,8 +107,8 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.namequeues.NamedQueuePayload; -import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; import org.apache.hadoop.hbase.net.Address; @@ -140,7 +140,6 @@ import org.apache.hadoop.hbase.security.access.NoopAccessChecker; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DNS; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -248,6 +247,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; @@ -1112,34 +1112,9 @@ private void closeAllScanners() { } } - // Exposed for testing - interface LogDelegate { - void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold); - } - - private static LogDelegate DEFAULT_LOG_DELEGATE = new LogDelegate() { - @Override - public void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold) { - if (LOG.isWarnEnabled()) { - LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold - + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " - + RpcServer.getRequestUserName().orElse(null) + "/" - + RpcServer.getRemoteAddress().orElse(null) - + " first region in multi=" + firstRegionName); - } - } - }; - - private final LogDelegate ld; - - public RSRpcServices(final HRegionServer rs) throws IOException { - this(rs, DEFAULT_LOG_DELEGATE); - } - // Directly invoked only for testing - RSRpcServices(final HRegionServer rs, final LogDelegate ld) throws IOException { + public RSRpcServices(final HRegionServer rs) throws IOException { final Configuration conf = rs.getConfiguration(); - this.ld = ld; regionServer = rs; rowSizeWarnThreshold = conf.getInt( HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); @@ -2627,12 +2602,15 @@ private void checkBatchSizeAndLogLargeSize(MultiRequest request) throws ServiceE sum += regionAction.getActionCount(); } if (sum > rowSizeWarnThreshold) { - ld.logBatchWarning(firstRegionName, sum, rowSizeWarnThreshold); + LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold + + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " + + RpcServer.getRequestUserName().orElse(null) + "/" + + RpcServer.getRemoteAddress().orElse(null) + " first region in multi=" + firstRegionName); if (rejectRowsWithSizeOverThreshold) { throw new ServiceException( - "Rejecting large batch operation for current batch with firstRegionName: " - + firstRegionName + " , Requested Number of Rows: " + sum + " , Size Threshold: " - + rowSizeWarnThreshold); + "Rejecting large batch operation for current batch with firstRegionName: " + + firstRegionName + " , Requested Number of Rows: " + sum + " , Size Threshold: " + + rowSizeWarnThreshold); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java index 614b04b8eb45..26de198a8d42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java @@ -17,6 +17,14 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; + import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -26,16 +34,20 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; -import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.log4j.Appender; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.spi.LoggingEvent; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; @@ -52,21 +64,23 @@ * via "Multi" commands) so classified as MediumTests */ @RunWith(Parameterized.class) -@Category(LargeTests.class) +@Category(MediumTests.class) public class TestMultiLogThreshold { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiLogThreshold.class); - - private static RSRpcServices SERVICES; + HBaseClassTestRule.forClass(TestMultiLogThreshold.class); - private static HBaseTestingUtility TEST_UTIL; - private static Configuration CONF; + private static final TableName NAME = TableName.valueOf("tableName"); private static final byte[] TEST_FAM = Bytes.toBytes("fam"); - private static RSRpcServices.LogDelegate LD; - private static HRegionServer RS; - private static int THRESHOLD; + + private HBaseTestingUtility util; + private Configuration conf; + private int threshold; + private HRegionServer rs; + private RSRpcServices services; + + private Appender appender; @Parameterized.Parameter public static boolean rejectLargeBatchOp; @@ -78,20 +92,22 @@ public static List params() { @Before public void setupTest() throws Exception { - final TableName tableName = TableName.valueOf("tableName"); - TEST_UTIL = new HBaseTestingUtility(); - CONF = TEST_UTIL.getConfiguration(); - THRESHOLD = CONF.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, - HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); - CONF.setBoolean("hbase.rpc.rows.size.threshold.reject", rejectLargeBatchOp); - TEST_UTIL.startMiniCluster(); - TEST_UTIL.createTable(tableName, TEST_FAM); - RS = TEST_UTIL.getRSForFirstRegionInTable(tableName); + util = new HBaseTestingUtility(); + conf = util.getConfiguration(); + threshold = + conf.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); + conf.setBoolean("hbase.rpc.rows.size.threshold.reject", rejectLargeBatchOp); + util.startMiniCluster(); + util.createTable(NAME, TEST_FAM); + rs = util.getRSForFirstRegionInTable(NAME); + appender = mock(Appender.class); + LogManager.getLogger(RSRpcServices.class).addAppender(appender); } @After public void tearDown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); + LogManager.getLogger(RSRpcServices.class).removeAppender(appender); + util.shutdownMiniCluster(); } private enum ActionType { @@ -104,18 +120,18 @@ private enum ActionType { * Actions */ private void sendMultiRequest(int rows, ActionType actionType) - throws ServiceException, IOException { + throws ServiceException, IOException { RpcController rpcc = Mockito.mock(HBaseRpcController.class); MultiRequest.Builder builder = MultiRequest.newBuilder(); int numRAs = 1; int numAs = 1; switch (actionType) { - case REGION_ACTIONS: - numRAs = rows; - break; - case ACTIONS: - numAs = rows; - break; + case REGION_ACTIONS: + numRAs = rows; + break; + case ACTIONS: + numAs = rows; + break; } for (int i = 0; i < numRAs; i++) { RegionAction.Builder rab = RegionAction.newBuilder(); @@ -128,38 +144,47 @@ private void sendMultiRequest(int rows, ActionType actionType) } builder.addRegionAction(rab.build()); } - LD = Mockito.mock(RSRpcServices.LogDelegate.class); - SERVICES = new RSRpcServices(RS, LD); - SERVICES.multi(rpcc, builder.build()); + services = new RSRpcServices(rs); + services.multi(rpcc, builder.build()); + } + + private void assertLogBatchWarnings(boolean expected) { + ArgumentCaptor captor = ArgumentCaptor.forClass(LoggingEvent.class); + verify(appender, atLeastOnce()).doAppend(captor.capture()); + boolean actual = false; + for (LoggingEvent event : captor.getAllValues()) { + if (event.getLevel() == Level.WARN && + event.getRenderedMessage().contains("Large batch operation detected")) { + actual = true; + break; + } + } + reset(appender); + assertEquals(expected, actual); } @Test public void testMultiLogThresholdRegionActions() throws ServiceException, IOException { try { - sendMultiRequest(THRESHOLD + 1, ActionType.REGION_ACTIONS); - Assert.assertFalse(rejectLargeBatchOp); + sendMultiRequest(threshold + 1, ActionType.REGION_ACTIONS); + assertFalse(rejectLargeBatchOp); } catch (ServiceException e) { - Assert.assertTrue(rejectLargeBatchOp); + assertTrue(rejectLargeBatchOp); } - Mockito.verify(LD, Mockito.times(1)) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + assertLogBatchWarnings(true); - sendMultiRequest(THRESHOLD, ActionType.REGION_ACTIONS); - Mockito.verify(LD, Mockito.never()) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + sendMultiRequest(threshold, ActionType.REGION_ACTIONS); + assertLogBatchWarnings(false); try { - sendMultiRequest(THRESHOLD + 1, ActionType.ACTIONS); - Assert.assertFalse(rejectLargeBatchOp); + sendMultiRequest(threshold + 1, ActionType.ACTIONS); + assertFalse(rejectLargeBatchOp); } catch (ServiceException e) { - Assert.assertTrue(rejectLargeBatchOp); + assertTrue(rejectLargeBatchOp); } - Mockito.verify(LD, Mockito.times(1)) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + assertLogBatchWarnings(true); - sendMultiRequest(THRESHOLD, ActionType.ACTIONS); - Mockito.verify(LD, Mockito.never()) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + sendMultiRequest(threshold, ActionType.ACTIONS); + assertLogBatchWarnings(false); } - } From 5c7432f4a9a99674fa3c318d00f8cbee0d83441c Mon Sep 17 00:00:00 2001 From: gvprathyusha6 Date: Mon, 9 Nov 2020 12:23:36 +0530 Subject: [PATCH 174/769] HBASE-24667 Rename configs that support atypical DNS set ups to put them in hbase.unsafe Closes #2542 Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/util/DNS.java | 14 ++++++- .../src/main/resources/hbase-default.xml | 6 +-- .../hbase/rest/TestSecureRESTServer.java | 2 +- .../hbase/regionserver/HRegionServer.java | 35 ++++++++++++---- .../TestRegionServerHostname.java | 40 +++++++++++++++---- 5 files changed, 75 insertions(+), 22 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java index 2b4e1cbf02cd..5c23ddcedb5a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java @@ -35,13 +35,22 @@ public final class DNS { // the specification of server hostname is optional. The hostname should be resolvable from // both master and region server @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) - public static final String RS_HOSTNAME_KEY = "hbase.regionserver.hostname"; + public static final String UNSAFE_RS_HOSTNAME_KEY = "hbase.unsafe.regionserver.hostname"; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static final String MASTER_HOSTNAME_KEY = "hbase.master.hostname"; private static boolean HAS_NEW_DNS_GET_DEFAULT_HOST_API; private static Method GET_DEFAULT_HOST_METHOD; + /** + * @deprecated since 2.4.0 and will be removed in 4.0.0. + * Use {@link DNS#UNSAFE_RS_HOSTNAME_KEY} instead. + * @see HBASE-24667 + */ + @Deprecated + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) + public static final String RS_HOSTNAME_KEY = "hbase.regionserver.hostname"; + static { try { GET_DEFAULT_HOST_METHOD = org.apache.hadoop.net.DNS.class @@ -50,6 +59,7 @@ public final class DNS { } catch (Exception e) { HAS_NEW_DNS_GET_DEFAULT_HOST_API = false; // FindBugs: Causes REC_CATCH_EXCEPTION. Suppressed } + Configuration.addDeprecation(RS_HOSTNAME_KEY, UNSAFE_RS_HOSTNAME_KEY); } public enum ServerType { @@ -106,7 +116,7 @@ public static String getHostname(@NonNull Configuration conf, @NonNull ServerTyp hostname = conf.get(MASTER_HOSTNAME_KEY); break; case REGIONSERVER: - hostname = conf.get(RS_HOSTNAME_KEY); + hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); break; default: hostname = null; diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 6fb6ce98e33d..def502a62cfc 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1119,19 +1119,19 @@ possible configurations would overwhelm and obscure the important. http://docs.oracle.com/javase/1.5.0/docs/api/java/net/Socket.html#getTcpNoDelay() - hbase.regionserver.hostname + hbase.unsafe.regionserver.hostname This config is for experts: don't set its value unless you really know what you are doing. When set to a non-empty value, this represents the (external facing) hostname for the underlying server. See https://issues.apache.org/jira/browse/HBASE-12954 for details. - hbase.regionserver.hostname.disable.master.reversedns + hbase.unsafe.regionserver.hostname.disable.master.reversedns false This config is for experts: don't set its value unless you really know what you are doing. When set to true, regionserver will use the current node hostname for the servername and HMaster will skip reverse DNS lookup and use the hostname sent by regionserver instead. Note that this config and - hbase.regionserver.hostname are mutually exclusive. See https://issues.apache.org/jira/browse/HBASE-18226 + hbase.unsafe.regionserver.hostname are mutually exclusive. See https://issues.apache.org/jira/browse/HBASE-18226 for more details. - 2.1.11 - 1.0.18 + 2.1.31 + 1.0.55 2.12.2 1.60 1.0.1 From 57d9cae48056a4530726b8a84c8640f01147881b Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Wed, 11 Nov 2020 17:39:39 +0530 Subject: [PATCH 178/769] HBASE-25187 Improve SizeCachedKV variants initialization (#2582) * HBASE-25187 Improve SizeCachedKV variants initialization * HBASE-25187 Improve SizeCachedKV variants initialization * The BBKeyValue also can be optimized * Change for SizeCachedKeyValue * Addressing revew comments * Fixing checkstyle and spot bugs comments * Spot bug fix for hashCode * Minor updates make the rowLen as short and some consturctor formatting * Change two more places where there was a cast --- .../hbase/ByteBufferKeyOnlyKeyValue.java | 14 ++- .../hbase/SizeCachedByteBufferKeyValue.java | 90 +++++++++++++++++++ .../hadoop/hbase/SizeCachedKeyValue.java | 16 +++- .../SizeCachedNoTagsByteBufferKeyValue.java | 80 +++++++++++++++++ .../hbase/SizeCachedNoTagsKeyValue.java | 9 +- .../hbase/io/encoding/RowIndexSeekerV1.java | 22 +++-- .../hbase/io/hfile/HFileReaderImpl.java | 28 +++--- 7 files changed, 234 insertions(+), 25 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java index 31f71f98c500..cc7e8d72c3d7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java @@ -61,10 +61,22 @@ public ByteBufferKeyOnlyKeyValue(ByteBuffer buf, int offset, int length) { * @param length */ public void setKey(ByteBuffer key, int offset, int length) { + setKey(key, offset, length, ByteBufferUtils.toShort(key, offset)); + } + + /** + * A setter that helps to avoid object creation every time and whenever + * there is a need to create new OffheapKeyOnlyKeyValue. + * @param key - the key part of the cell + * @param offset - offset of the cell + * @param length - length of the cell + * @param rowLen - the rowlen part of the cell + */ + public void setKey(ByteBuffer key, int offset, int length, short rowLen) { this.buf = key; this.offset = offset; this.length = length; - this.rowLen = ByteBufferUtils.toShort(this.buf, this.offset); + this.rowLen = rowLen; } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java new file mode 100644 index 000000000000..9f5d9c179dd5 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This Cell is an implementation of {@link ByteBufferExtendedCell} where the data resides in + * off heap/ on heap ByteBuffer + */ +@InterfaceAudience.Private +public class SizeCachedByteBufferKeyValue extends ByteBufferKeyValue { + + public static final int FIXED_OVERHEAD = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT; + private short rowLen; + private int keyLen; + + public SizeCachedByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen, short rowLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; + setSequenceId(seqId); + } + + @Override + public short getRowLength() { + return rowLen; + } + + @Override + public int getKeyLength() { + return this.keyLen; + } + + @Override + public long heapSize() { + return super.heapSize() + FIXED_OVERHEAD; + } + + /** + * Override by just returning the length for saving cost of method dispatching. If not, it will + * call {@link ExtendedCell#getSerializedSize()} firstly, then forward to + * {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657) + */ + @Override + public int getSerializedSize() { + return this.length; + } + + @Override + public boolean equals(Object other) { + return super.equals(other); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java index 663f3eb77c66..5141cfba08f7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java @@ -39,12 +39,22 @@ public class SizeCachedKeyValue extends KeyValue { private short rowLen; private int keyLen; - public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId) { + public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen) { super(bytes, offset, length); // We will read all these cached values at least once. Initialize now itself so that we can // avoid uninitialized checks with every time call - rowLen = super.getRowLength(); - keyLen = super.getKeyLength(); + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen, + short rowLen) { + super(bytes, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; setSequenceId(seqId); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java new file mode 100644 index 000000000000..0374169d9b79 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This Cell is an implementation of {@link ByteBufferExtendedCell} where the data resides in + * off heap/ on heap ByteBuffer + */ +@InterfaceAudience.Private +public class SizeCachedNoTagsByteBufferKeyValue extends NoTagsByteBufferKeyValue { + + public static final int FIXED_OVERHEAD = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT; + private short rowLen; + private int keyLen; + + public SizeCachedNoTagsByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedNoTagsByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen, short rowLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; + setSequenceId(seqId); + } + + @Override + public short getRowLength() { + return rowLen; + } + + @Override + public int getKeyLength() { + return this.keyLen; + } + + @Override + public long heapSize() { + return super.heapSize() + FIXED_OVERHEAD; + } + + @Override + public boolean equals(Object other) { + return super.equals(other); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java index 88b6177fcb18..85bdb52bbfd4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java @@ -32,8 +32,13 @@ @InterfaceAudience.Private public class SizeCachedNoTagsKeyValue extends SizeCachedKeyValue { - public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId) { - super(bytes, offset, length, seqId); + public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen) { + super(bytes, offset, length, seqId, keyLen); + } + + public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen, + short rowLen) { + super(bytes, offset, length, seqId, keyLen, rowLen); } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java index 7ff7555ceb27..efc37e64522c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java @@ -18,15 +18,15 @@ import java.nio.ByteBuffer; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.NoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.SizeCachedByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; +import org.apache.hadoop.hbase.SizeCachedNoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; import org.apache.hadoop.hbase.io.encoding.AbstractDataBlockEncoder.AbstractEncodedSeeker; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -359,26 +359,30 @@ public Cell toCell() { // TODO : reduce the varieties of KV here. Check if based on a boolean // we can handle the 'no tags' case. if (tagsLength > 0) { + // TODO : getRow len here. ret = new SizeCachedKeyValue(currentBuffer.array(), - currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId); + currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId, keyLength); } else { ret = new SizeCachedNoTagsKeyValue(currentBuffer.array(), - currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId); + currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId, keyLength); } } else { currentBuffer.asSubByteBuffer(startOffset, cellBufSize, tmpPair); ByteBuffer buf = tmpPair.getFirst(); if (buf.isDirect()) { - ret = - tagsLength > 0 ? new ByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId) - : new NoTagsByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId); + // TODO : getRow len here. + ret = tagsLength > 0 + ? new SizeCachedByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId, + keyLength) + : new SizeCachedNoTagsByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId, + keyLength); } else { if (tagsLength > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() - + tmpPair.getSecond(), cellBufSize, seqId); + + tmpPair.getSecond(), cellBufSize, seqId, keyLength); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() - + tmpPair.getSecond(), cellBufSize, seqId); + + tmpPair.getSecond(), cellBufSize, seqId, keyLength); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index de0b15feebb8..2060b20de415 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.NoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.SizeCachedByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; +import org.apache.hadoop.hbase.SizeCachedNoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; @@ -322,6 +322,7 @@ protected static class HFileScannerImpl implements HFileScanner { private long currMemstoreTS; protected final HFile.Reader reader; private int currTagsLen; + private short rowLen; // buffer backed keyonlyKV private ByteBufferKeyOnlyKeyValue bufBackedKeyOnlyKv = new ByteBufferKeyOnlyKeyValue(); // A pair for reusing in blockSeek() so that we don't garbage lot of objects @@ -446,6 +447,7 @@ protected void readKeyValueLen() { this.currKeyLen = (int)(ll >> Integer.SIZE); this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); checkKeyValueLen(); + this.rowLen = blockBuffer.getShortAfterPosition(Bytes.SIZEOF_LONG); // Move position past the key and value lengths and then beyond the key and value int p = (Bytes.SIZEOF_LONG + currKeyLen + currValueLen); if (reader.getFileContext().isIncludesTags()) { @@ -554,8 +556,9 @@ protected int blockSeek(Cell key, boolean seekBefore) { + " path=" + reader.getPath()); } offsetFromPos += Bytes.SIZEOF_LONG; + this.rowLen = blockBuffer.getShortAfterPosition(offsetFromPos); blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair); - bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen); + bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen, rowLen); int comp = PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); offsetFromPos += klen + vlen; @@ -790,23 +793,28 @@ public Cell getCell() { // we can handle the 'no tags' case. if (currTagsLen > 0) { ret = new SizeCachedKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } else { ret = new SizeCachedNoTagsKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } } else { ByteBuffer buf = blockBuffer.asSubByteBuffer(cellBufSize); if (buf.isDirect()) { - ret = currTagsLen > 0 ? new ByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId) - : new NoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId); + ret = currTagsLen > 0 + ? new SizeCachedByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen) + : new SizeCachedNoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen); } else { if (currTagsLen > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId); + cellBufSize, seqId, currKeyLen, rowLen); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId); + cellBufSize, seqId, currKeyLen, rowLen); } } } @@ -1060,7 +1068,7 @@ public String getValueString() { public int compareKey(CellComparator comparator, Cell key) { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, pair); - this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen); + this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen, rowLen); return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, this.bufBackedKeyOnlyKv); } From 0b6d6fd773cf75dc49151975ed0b4ee82c741f59 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Thu, 12 Nov 2020 17:40:24 +0800 Subject: [PATCH 179/769] HBASE-25276 Need to throw the original exception in HRegion#openHRegion (#2648) Signed-off-by: Guanghao Zhang --- .../org/apache/hadoop/hbase/regionserver/HRegion.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d0e628432b13..67764b9f635b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -8137,11 +8137,16 @@ protected HRegion openHRegion(final CancelableProgressable reporter) RegionReplicaUtil.isDefaultReplica(getRegionInfo())) { writeRegionOpenMarker(wal, openSeqNum); } - } catch(Throwable t) { + } catch (Throwable t) { // By coprocessor path wrong region will open failed, // MetricsRegionWrapperImpl is already init and not close, // add region close when open failed - this.close(); + try { + this.close(); + } catch (Throwable e) { + LOG.warn("Open region: {} failed. Try close region but got exception ", this.getRegionInfo(), + e); + } throw t; } return this; From 0611ca49ecf50bd96ca4cbb6be7345fcfdab0567 Mon Sep 17 00:00:00 2001 From: Mate Szalay-Beko Date: Thu, 12 Nov 2020 12:37:43 +0100 Subject: [PATCH 180/769] HBASE-25267 Add SSL keystore type and truststore related configs for HBase RESTServer (#2642) HBASE-25267 Make SSL keystore type configurable in HBase RESTServer In this patch I want to introduce the hbase.rest.ssl.keystore.type parameter, enabling us to customize the keystore type for the REST server. If the parameter is not provided, then we should fall-back to the current behaviour (which assumes keystore type JKS). This is similar to how we already configure the InfoServer objects with the ssl.server.keystore.type parameter to set up HTTPS for the various admin UIs. Signed-off-by: Wellington Chevreuil Signed-off-by: Balazs Meszaros Signed-off-by: Sean Busbey --- .../hbase/http/ssl/KeyStoreTestUtil.java | 79 ++++++- .../apache/hadoop/hbase/rest/Constants.java | 4 + .../apache/hadoop/hbase/rest/RESTServer.java | 19 ++ .../hadoop/hbase/rest/client/Client.java | 86 +++++++- .../hadoop/hbase/rest/TestRESTServerSSL.java | 195 ++++++++++++++++++ 5 files changed, 368 insertions(+), 15 deletions(-) create mode 100644 hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index c8abd9c6cebc..c201c7a52328 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -101,7 +101,12 @@ public static KeyPair generateKeyPair(String algorithm) private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { - KeyStore ks = KeyStore.getInstance("JKS"); + return createEmptyKeyStore("jks"); + } + + private static KeyStore createEmptyKeyStore(String keyStoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = KeyStore.getInstance(keyStoreType); ks.load(null, null); // initialize return ks; } @@ -117,18 +122,29 @@ private static void saveKeyStore(KeyStore ks, String filename, } } + /** + * Creates a keystore with a single key and saves it to a file. + * This method will use the same password for the keystore and for the key. + * This method will always generate a keystore file in JKS format. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ public static void createKeyStore(String filename, String password, String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); - ks.setKeyEntry(alias, privateKey, password.toCharArray(), - new Certificate[]{cert}); - saveKeyStore(ks, filename, password); + createKeyStore(filename, password, password, alias, privateKey, cert); } /** * Creates a keystore with a single key and saves it to a file. + * This method will always generate a keystore file in JKS format. * * @param filename String file to save * @param password String store password to set on keystore @@ -143,17 +159,66 @@ public static void createKeyStore(String filename, String password, String keyPassword, String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); + createKeyStore(filename, password, keyPassword, alias, privateKey, cert, "JKS"); + } + + + /** + * Creates a keystore with a single key and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param keyPassword String key password to set on key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @param keystoreType String keystore file type (e.g. "JKS") + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createKeyStore(String filename, String password, String keyPassword, + String alias, Key privateKey, Certificate cert, + String keystoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(keystoreType); ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), new Certificate[]{cert}); saveKeyStore(ks, filename, password); } + /** + * Creates a truststore with a single certificate and saves it to a file. + * This method uses the default JKS truststore type. + * + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ public static void createTrustStore(String filename, String password, String alias, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); + createTrustStore(filename, password, alias, cert, "JKS"); + } + + /** + * Creates a truststore with a single certificate and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add + * @param trustStoreType String keystore file type (e.g. "JKS") + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createTrustStore(String filename, String password, String alias, + Certificate cert, String trustStoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(trustStoreType); ks.setCertificateEntry(alias, cert); saveKeyStore(ks, filename, password); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 4cf8a93ed5b0..704eac78db5a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -51,6 +51,10 @@ public interface Constants { String REST_SSL_ENABLED = "hbase.rest.ssl.enabled"; String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store"; String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password"; + String REST_SSL_KEYSTORE_TYPE = "hbase.rest.ssl.keystore.type"; + String REST_SSL_TRUSTSTORE_STORE = "hbase.rest.ssl.truststore.store"; + String REST_SSL_TRUSTSTORE_PASSWORD = "hbase.rest.ssl.truststore.password"; + String REST_SSL_TRUSTSTORE_TYPE = "hbase.rest.ssl.truststore.type"; String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword"; String REST_SSL_EXCLUDE_CIPHER_SUITES = "hbase.rest.ssl.exclude.cipher.suites"; String REST_SSL_INCLUDE_CIPHER_SUITES = "hbase.rest.ssl.include.cipher.suites"; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index c6f769ee6054..4e6adfb6d7c3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -27,6 +27,7 @@ import java.util.concurrent.ArrayBlockingQueue; import javax.servlet.DispatcherType; import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -305,14 +306,32 @@ public synchronized void run() throws Exception { SslContextFactory sslCtxFactory = new SslContextFactory(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); + String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE); String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null); String keyPassword = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password); sslCtxFactory.setKeyStorePath(keystore); + if(StringUtils.isNotBlank(keystoreType)) { + sslCtxFactory.setKeyStoreType(keystoreType); + } sslCtxFactory.setKeyStorePassword(password); sslCtxFactory.setKeyManagerPassword(keyPassword); + String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE); + if(StringUtils.isNotBlank(trustStore)) { + sslCtxFactory.setTrustStorePath(trustStore); + } + String trustStorePassword = + HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); + if(StringUtils.isNotBlank(trustStorePassword)) { + sslCtxFactory.setTrustStorePassword(trustStorePassword); + } + String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE); + if(StringUtils.isNotBlank(trustStoreType)) { + sslCtxFactory.setTrustStoreType(trustStoreType); + } + String[] excludeCiphers = servlet.getConfiguration().getStrings( REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (excludeCiphers.length != 0) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 9e6661bd2aac..47700aa9e4fe 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -21,15 +21,23 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; import java.util.Collections; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; - +import javax.net.ssl.SSLContext; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; @@ -37,6 +45,7 @@ import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.HttpClient; +import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpHead; @@ -44,9 +53,10 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.entity.InputStreamEntity; -import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; import org.apache.http.message.BasicHeader; -import org.apache.http.params.CoreConnectionPNames; +import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -81,14 +91,35 @@ public Client() { this(null); } - private void initialize(Cluster cluster, boolean sslEnabled) { + private void initialize(Cluster cluster, boolean sslEnabled, Optional trustStore) { this.cluster = cluster; this.sslEnabled = sslEnabled; extraHeaders = new ConcurrentHashMap<>(); String clspath = System.getProperty("java.class.path"); LOG.debug("classpath " + clspath); - this.httpClient = new DefaultHttpClient(); - this.httpClient.getParams().setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 2000); + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + + RequestConfig requestConfig = RequestConfig.custom(). + setConnectTimeout(2000).build(); + httpClientBuilder.setDefaultRequestConfig(requestConfig); + + // Since HBASE-25267 we don't use the deprecated DefaultHttpClient anymore. + // The new http client would decompress the gzip content automatically. + // In order to keep the original behaviour of this public class, we disable + // automatic content compression. + httpClientBuilder.disableContentCompression(); + + if(sslEnabled && trustStore.isPresent()) { + try { + SSLContext sslcontext = + SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); + httpClientBuilder.setSSLContext(sslcontext); + } catch (NoSuchAlgorithmException | KeyStoreException | KeyManagementException e) { + throw new ClientTrustStoreInitializationException("Error while processing truststore", e); + } + } + + this.httpClient = httpClientBuilder.build(); } /** @@ -96,7 +127,7 @@ private void initialize(Cluster cluster, boolean sslEnabled) { * @param cluster the cluster definition */ public Client(Cluster cluster) { - initialize(cluster, false); + this(cluster, false); } /** @@ -105,7 +136,38 @@ public Client(Cluster cluster) { * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, boolean sslEnabled) { - initialize(cluster, sslEnabled); + initialize(cluster, sslEnabled, Optional.empty()); + } + + /** + * Constructor, allowing to define custom trust store (only for SSL connections) + * + * @param cluster the cluster definition + * @param trustStorePath custom trust store to use for SSL connections + * @param trustStorePassword password to use for custom trust store + * @param trustStoreType type of custom trust store + * + * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded + */ + public Client(Cluster cluster, String trustStorePath, + Optional trustStorePassword, Optional trustStoreType) { + + char[] password = trustStorePassword.map(String::toCharArray).orElse(null); + String type = trustStoreType.orElse(KeyStore.getDefaultType()); + + KeyStore trustStore; + try(FileInputStream inputStream = new FileInputStream(new File(trustStorePath))) { + trustStore = KeyStore.getInstance(type); + trustStore.load(inputStream, password); + } catch (KeyStoreException e) { + throw new ClientTrustStoreInitializationException( + "Invalid trust store type: " + type, e); + } catch (CertificateException | NoSuchAlgorithmException | IOException e) { + throw new ClientTrustStoreInitializationException( + "Trust store load error: " + trustStorePath, e); + } + + initialize(cluster, true, Optional.of(trustStore)); } /** @@ -724,4 +786,12 @@ public Response delete(Cluster cluster, String path, Header extraHdr) throws IOE method.releaseConnection(); } } + + + public static class ClientTrustStoreInitializationException extends RuntimeException { + + public ClientTrustStoreInitializationException(String message, Throwable cause) { + super(message, cause); + } + } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java new file mode 100644 index 000000000000..a1fe2f010fdb --- /dev/null +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.Optional; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RestTests.class, MediumTests.class}) +public class TestRESTServerSSL { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRESTServerSSL.class); + + private static final String KEY_STORE_PASSWORD = "myKSPassword"; + private static final String TRUST_STORE_PASSWORD = "myTSPassword"; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); + private static Client sslClient; + private static File keyDir; + private Configuration conf; + + @BeforeClass + public static void beforeClass() throws Exception { + keyDir = initKeystoreDir(); + KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); + X509Certificate serverCertificate = KeyStoreTestUtil.generateCertificate( + "CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); + + generateTrustStore("jks", serverCertificate); + generateTrustStore("jceks", serverCertificate); + generateTrustStore("pkcs12", serverCertificate); + + generateKeyStore("jks", keyPair, serverCertificate); + generateKeyStore("jceks", keyPair, serverCertificate); + generateKeyStore("pkcs12", keyPair, serverCertificate); + + TEST_UTIL.startMiniCluster(); + } + + @AfterClass + public static void afterClass() throws Exception { + // this will also delete the generated test keystore / teststore files, + // as we were placing them under the dataTestDir used by the minicluster + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeEachTest() { + conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.set(Constants.REST_SSL_ENABLED, "true"); + conf.set(Constants.REST_SSL_KEYSTORE_KEYPASSWORD, KEY_STORE_PASSWORD); + conf.set(Constants.REST_SSL_KEYSTORE_PASSWORD, KEY_STORE_PASSWORD); + conf.set(Constants.REST_SSL_TRUSTSTORE_PASSWORD, TRUST_STORE_PASSWORD); + } + + @After + public void tearDownAfterTest() { + REST_TEST_UTIL.shutdownServletContainer(); + } + + @Test + public void testSslConnection() throws Exception { + startRESTServerWithDefaultKeystoreType(); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test(expected = org.apache.http.client.ClientProtocolException.class) + public void testNonSslClientDenied() throws Exception { + startRESTServerWithDefaultKeystoreType(); + + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + Client nonSslClient = new Client(localCluster, false); + + nonSslClient.get("/version"); + } + + @Test + public void testSslConnectionUsingKeystoreFormatJKS() throws Exception { + startRESTServer("jks"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test + public void testSslConnectionUsingKeystoreFormatJCEKS() throws Exception { + startRESTServer("jceks"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test + public void testSslConnectionUsingKeystoreFormatPKCS12() throws Exception { + startRESTServer("pkcs12"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + + + private static File initKeystoreDir() { + String dataTestDir = TEST_UTIL.getDataTestDir().toString(); + File keystoreDir = new File(dataTestDir, TestRESTServerSSL.class.getSimpleName() + "_keys"); + keystoreDir.mkdirs(); + return keystoreDir; + } + + private static void generateKeyStore(String keyStoreType, KeyPair keyPair, + X509Certificate serverCertificate) throws Exception { + String keyStorePath = getKeystoreFilePath(keyStoreType); + KeyStoreTestUtil.createKeyStore(keyStorePath, KEY_STORE_PASSWORD, KEY_STORE_PASSWORD, + "serverKS", keyPair.getPrivate(), serverCertificate, keyStoreType); + } + + private static void generateTrustStore(String trustStoreType, X509Certificate serverCertificate) + throws Exception { + String trustStorePath = getTruststoreFilePath(trustStoreType); + KeyStoreTestUtil.createTrustStore(trustStorePath, TRUST_STORE_PASSWORD, "serverTS", + serverCertificate, trustStoreType); + } + + private static String getKeystoreFilePath(String keyStoreType) { + return String.format("%s/serverKS.%s", keyDir.getAbsolutePath(), keyStoreType); + } + + private static String getTruststoreFilePath(String trustStoreType) { + return String.format("%s/serverTS.%s", keyDir.getAbsolutePath(), trustStoreType); + } + + private void startRESTServerWithDefaultKeystoreType() throws Exception { + conf.set(Constants.REST_SSL_KEYSTORE_STORE, getKeystoreFilePath("jks")); + conf.set(Constants.REST_SSL_TRUSTSTORE_STORE, getTruststoreFilePath("jks")); + + REST_TEST_UTIL.startServletContainer(conf); + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + sslClient = new Client(localCluster, getTruststoreFilePath("jks"), + Optional.of(TRUST_STORE_PASSWORD), Optional.empty()); + } + + private void startRESTServer(String storeType) throws Exception { + conf.set(Constants.REST_SSL_KEYSTORE_TYPE, storeType); + conf.set(Constants.REST_SSL_KEYSTORE_STORE, getKeystoreFilePath(storeType)); + + conf.set(Constants.REST_SSL_TRUSTSTORE_STORE, getTruststoreFilePath(storeType)); + conf.set(Constants.REST_SSL_TRUSTSTORE_TYPE, storeType); + + REST_TEST_UTIL.startServletContainer(conf); + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + sslClient = new Client(localCluster, getTruststoreFilePath(storeType), + Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); + } + +} From aaeeaa582e7d6894015c263299ae12a2f08a4301 Mon Sep 17 00:00:00 2001 From: WenFeiYi Date: Thu, 12 Nov 2020 22:13:24 +0800 Subject: [PATCH 181/769] HBASE-25253 Deprecated master carrys regions related methods and configs (#2635) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/ZNodeClearer.java | 4 +++- .../hadoop/hbase/master/LoadBalancer.java | 21 +++++++++++++++++++ .../master/balancer/BaseLoadBalancer.java | 19 +++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index 697706507bf0..1cde2fa24844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -146,9 +146,11 @@ public static String parseMasterServerName(String rsZnodePath) { } /** - * * @return true if cluster is configured with master-rs collocation + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated private static boolean tablesOnMaster(Configuration conf) { boolean tablesOnMaster = true; String confValue = conf.get(BaseLoadBalancer.TABLES_ON_MASTER); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 90cb3946f8b2..d908aa5ef514 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -52,12 +52,20 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse * Master can carry regions as of hbase-2.0.0. * By default, it carries no tables. * TODO: Add any | system as flags to indicate what it can do. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated String TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster"; /** * Master carries system tables. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated String SYSTEM_TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster.systemTablesOnly"; // Used to signal to the caller that the region(s) cannot be assigned @@ -159,15 +167,28 @@ Map> retainAssignment(Map r /** * @return true if Master carries regions + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated static boolean isTablesOnMaster(Configuration conf) { return conf.getBoolean(TABLES_ON_MASTER, false); } + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated static boolean isSystemTablesOnlyOnMaster(Configuration conf) { return conf.getBoolean(SYSTEM_TABLES_ON_MASTER, false); } + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated static boolean isMasterCanHostUserRegions(Configuration conf) { return isTablesOnMaster(conf) && !isSystemTablesOnlyOnMaster(conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index a47bff26a090..5ecedb35ff08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -1042,7 +1042,14 @@ public String toString() { protected ClusterMetrics clusterStatus = null; protected ServerName masterServerName; protected MasterServices services; + + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated protected boolean onlySystemTablesOnMaster; + protected boolean maintenanceMode; @Override @@ -1075,7 +1082,11 @@ protected void setSlop(Configuration conf) { /** * Check if a region belongs to some system table. * If so, the primary replica may be expected to be put on the master regionserver. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated public boolean shouldBeOnMaster(RegionInfo region) { return (this.maintenanceMode || this.onlySystemTablesOnMaster) && region.getTable().isSystemTable(); @@ -1083,7 +1094,11 @@ public boolean shouldBeOnMaster(RegionInfo region) { /** * Balance the regions that should be on master regionserver. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated protected List balanceMasterRegions(Map> clusterMap) { if (masterServerName == null || clusterMap == null || clusterMap.size() <= 1) return null; List plans = null; @@ -1132,7 +1147,11 @@ protected List balanceMasterRegions(Map /** * If master is configured to carry system tables only, in here is * where we figure what to assign it. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated @NonNull protected Map> assignMasterSystemRegions( Collection regions, List servers) { From 035c192eb665469ce0c071db86c78f4a873c123b Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Thu, 12 Nov 2020 15:37:12 +0100 Subject: [PATCH 182/769] HBASE-25275 Upgrade asciidoctor (#2647) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- pom.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index cd76195611c2..9255f833d679 100755 --- a/pom.xml +++ b/pom.xml @@ -1430,10 +1430,10 @@ ${project.reporting.outputDirectory}/ book - images - coderay ${project.version} + images + coderay @@ -1641,8 +1641,8 @@ 1.0.0 4.2.0 - 1.5.8 - 1.5.0-rc.2 + 2.1.0 + 1.5.3 3.0.0 1.4 From 107f73804919f9494b25f42a837df1ee0b6cfa22 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 2 Dec 2020 22:23:03 +0800 Subject: [PATCH 240/769] HBASE-25342 Upgrade error prone to 2.4.0 (#2725) Have to disable MutablePublicArray because of a bug in error prone https://github.com/google/error-prone/issues/1645 Signed-off-by: stack --- hbase-build-configuration/pom.xml | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index 4bab5e9c579d..49a1dea8a199 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -108,7 +108,7 @@ -XDcompilePolicy=simple - -Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -Xep:FallThrough:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR + -Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -Xep:FallThrough:OFF -Xep:MutablePublicArray:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR -J-Xbootclasspath/p:${settings.localRepository}/com/google/errorprone/javac/${javac.version}/javac-${javac.version}.jar diff --git a/pom.xml b/pom.xml index 5b95ca4b441d..05fde4f5453d 100755 --- a/pom.xml +++ b/pom.xml @@ -1650,7 +1650,7 @@ --> 8.28 1.6.0 - 2.3.4 + 2.4.0 2.4.2 1.0.0 1.8 From 56eb5c9fc8de796665fe959087cb24b9f5f1afd4 Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Wed, 2 Dec 2020 20:36:37 +0530 Subject: [PATCH 241/769] HBASE-25246 Backup/Restore hbase cell tags Closes #2706 Signed-off-by: Viraj Jasani --- .../hbase/shaded/protobuf/ProtobufUtil.java | 24 ++- .../shaded/protobuf/TestProtobufUtil.java | 44 +++++ .../apache/hadoop/hbase/mapreduce/Import.java | 5 +- .../hbase/mapreduce/TestImportExport.java | 158 ++++++++++++++++++ 4 files changed, 221 insertions(+), 10 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index b9a08676f8ee..cfbdd6486255 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2015,7 +2015,8 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); - // TODO : Once tags become first class then we may have to set tags to kvbuilder. + kvbuilder.setTags(wrap(((ByteBufferExtendedCell) kv).getTagsByteBuffer(), + ((ByteBufferExtendedCell) kv).getTagsPosition(), kv.getTagsLength())); } else { kvbuilder.setRow( UnsafeByteOperations.unsafeWrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); @@ -2027,6 +2028,8 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + kvbuilder.setTags(UnsafeByteOperations.unsafeWrap(kv.getTagsArray(), kv.getTagsOffset(), + kv.getTagsLength())); } return kvbuilder.build(); } @@ -2039,14 +2042,17 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - return cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()) - .build(); + ExtendedCellBuilder builder = cellBuilder.clear() + .setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()) + .setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()) + .setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()); + if (cell.hasTags()) { + builder.setTags(cell.getTags().toByteArray()); + } + return builder.build(); } public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 7d6eda817cfa..791beb7ede55 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -18,22 +18,30 @@ package org.apache.hadoop.hbase.shaded.protobuf; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -479,4 +487,40 @@ public void testRegionLockInfo() { + "\"sharedLockCount\":0" + "}]", lockJson); } + + /** + * Test {@link ProtobufUtil#toCell(Cell)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell)} conversion + * methods when it contains tags. + */ + @Test + public void testCellConversionWithTags() { + String tagStr = "tag-1"; + byte tagType = (byte)10; + Tag tag = new ArrayBackedTag(tagType, tagStr); + + ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + cellBuilder.setRow(Bytes.toBytes("row1")); + cellBuilder.setFamily(Bytes.toBytes("f1")); + cellBuilder.setQualifier(Bytes.toBytes("q1")); + cellBuilder.setValue(Bytes.toBytes("value1")); + cellBuilder.setType(Cell.Type.Delete); + cellBuilder.setTags(Collections.singletonList(tag)); + Cell cell = cellBuilder.build(); + + ClientProtos.Result protoResult = + ProtobufUtil.toResult(Result.create(Collections.singletonList(cell))); + assertNotNull(protoResult); + assertEquals(1, protoResult.getCellCount()); + + CellProtos.Cell protoCell = protoResult.getCell(0); + ExtendedCellBuilder decodedBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + Cell decodedCell = ProtobufUtil.toCell(decodedBuilder, protoCell); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(1, decodedTags.size()); + Tag decodedTag = decodedTags.get(0); + assertEquals(tagType, decodedTag.getType()); + assertEquals(tagStr, Tag.getValueAsString(decodedTag)); + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 239a12bdc688..30071fdfd809 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -511,6 +512,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { + List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -524,7 +526,8 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength()); // value length + kv.getValueLength(), // value length + tags.size() == 0 ? null: tags); } } return kv; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 12060a742a2b..5a95fd8eecb6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -34,10 +34,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -46,10 +49,12 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -58,11 +63,18 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.CellImporter; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -117,6 +129,9 @@ public class TestImportExport { private static final long now = System.currentTimeMillis(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final String TEST_ATTR = "source_op"; + public static final String TEST_TAG = "test_tag"; @BeforeClass public static void beforeClass() throws Throwable { @@ -801,4 +816,147 @@ public boolean isWALVisited() { return isVisited; } } + + /** + * Add cell tags to delete mutations, run export and import tool and + * verify that tags are present in import table also. + * @throws Throwable throws Throwable. + */ + @Test + public void testTagsAddition() throws Throwable { + final TableName exportTable = TableName.valueOf(name.getMethodName()); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(desc); + + Table exportT = UTIL.getConnection().getTable(exportTable); + + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + exportT.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + exportT.delete(d); + + // Run export too with KeyValueCodecWithTags as Codec. This will ensure that export tool + // will use KeyValueCodecWithTags. + String[] args = new String[] { + "-D" + ExportUtils.RAW_SCAN + "=true", + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + exportTable.getNameAsString(), + FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); + // Assert tag exists in exportTable + assertTagExists(exportTable); + + // Create an import table with MetadataController. + final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); + TableDescriptor importTableDesc = TableDescriptorBuilder + .newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(importTableDesc); + + // Run import tool. + args = new String[] { + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + importTable.getNameAsString(), + FQ_OUTPUT_DIR + }; + assertTrue(runImport(args)); + // Make sure that tags exists in imported table. + assertTagExists(importTable); + } + + private void assertTagExists(TableName table) throws IOException { + List values = new ArrayList<>(); + for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { + Scan scan = new Scan(); + // Make sure to set rawScan to true so that we will get Delete Markers. + scan.setRaw(true); + scan.readAllVersions(); + scan.withStartRow(ROW1); + // Need to use RegionScanner instead of table#getScanner since the latter will + // not return tags since it will go through rpc layer and remove tags intentionally. + RegionScanner scanner = region.getScanner(scan); + scanner.next(values); + if (!values.isEmpty()) { + break; + } + } + boolean deleteFound = false; + for (Cell cell: values) { + if (PrivateCellUtil.isDelete(cell.getType().getCode())) { + deleteFound = true; + List tags = PrivateCellUtil.getTags(cell); + Assert.assertEquals(1, tags.size()); + for (Tag tag : tags) { + Assert.assertEquals(TEST_TAG, Tag.getValueAsString(tag)); + } + } + } + Assert.assertTrue(deleteFound); + } + + /* + This co-proc will add a cell tag to delete mutation. + */ + public static class MetadataController implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) + throws IOException { + if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { + return; + } + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (!(m instanceof Delete)) { + continue; + } + byte[] sourceOpAttr = m.getAttribute(TEST_ATTR); + if (sourceOpAttr == null) { + continue; + } + Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); + List updatedCells = new ArrayList<>(); + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + Cell cell = cellScanner.current(); + List tags = PrivateCellUtil.getTags(cell); + tags.add(sourceOpTag); + Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + updatedCells.add(updatedCell); + } + m.getFamilyCellMap().clear(); + // Clear and add new Cells to the Mutation. + for (Cell cell : updatedCells) { + Delete d = (Delete) m; + d.add(cell); + } + } + } + } } From 946fa81715e8bd1d705bc07061533454bdfd1ae4 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 2 Dec 2020 09:55:24 -0800 Subject: [PATCH 242/769] HBASE-25349 [Flakey Tests] branch-2 TestRefreshRecoveredReplication.testReplicationRefreshSource:141 Waiting timed out after [60,000] msec (#2731) Start the check for recovered queue presence earlier. Signed-off-by: Nick Dimiduk --- .../ReplicationSourceManager.java | 2 +- .../TestRefreshRecoveredReplication.java | 29 ++++++++++++------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 95157ca98d9b..c1166802b0ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -999,7 +999,7 @@ public void run() { wals.add(wal); } oldsources.add(src); - LOG.trace("Added source for recovered queue: " + src.getQueueId()); + LOG.info("Added source for recovered queue {}", src.getQueueId()); for (String wal : walsSet) { LOG.trace("Enqueueing log from recovered queue for source: " + src.getQueueId()); src.enqueueLog(new Path(oldLogDir, wal)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java index f84f32abdf88..cf4f7106f060 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; +import java.util.Collection; +import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -32,6 +35,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.replication.TestReplicationBase; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; @@ -51,6 +55,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; +import static org.junit.Assert.assertEquals; /** * Testcase for HBASE-24871. @@ -75,6 +80,7 @@ public class TestRefreshRecoveredReplication extends TestReplicationBase { @BeforeClass public static void setUpBeforeClass() throws Exception { + // NUM_SLAVES1 is presumed 2 in below. NUM_SLAVES1 = 2; // replicate slowly Configuration conf1 = UTIL1.getConfiguration(); @@ -121,22 +127,25 @@ public void testReplicationRefreshSource() throws Exception { table1.put(new Put(r).addColumn(famName, famName, r)); } - // kill rs holding table region - Optional server = UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads() - .stream() + // Kill rs holding table region. There are only TWO servers. We depend on it. + List rss = UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads(); + assertEquals(2, rss.size()); + Optional server = rss.stream() .filter(rst -> CollectionUtils.isNotEmpty(rst.getRegionServer().getRegions(tablename))) .findAny(); Assert.assertTrue(server.isPresent()); + HRegionServer otherServer = rss.get(0).getRegionServer() == server.get().getRegionServer()? + rss.get(1).getRegionServer(): rss.get(0).getRegionServer(); server.get().getRegionServer().abort("stopping for test"); + // waiting for recovered peer to appear. + Replication replication = (Replication)otherServer.getReplicationSourceService(); + UTIL1.waitFor(60000, () -> !replication.getReplicationManager().getOldSources().isEmpty()); + // Wait on only one server being up. UTIL1.waitFor(60000, () -> - UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() == NUM_SLAVES1 - 1); + // Have to go back to source here because getLiveRegionServerThreads makes new array each time + UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() == NUM_SLAVES1 - 1); UTIL1.waitTableAvailable(tablename); - - // waiting for recovered peer to start - Replication replication = (Replication) UTIL1.getMiniHBaseCluster() - .getLiveRegionServerThreads().get(0).getRegionServer().getReplicationSourceService(); - UTIL1.waitFor(60000, () -> - !replication.getReplicationManager().getOldSources().isEmpty()); + LOG.info("Available {}", tablename); // disable peer to trigger refreshSources hbaseAdmin.disableReplicationPeer(PEER_ID2); From fa257539ca38607b9705e95e3c6d41f6041bff97 Mon Sep 17 00:00:00 2001 From: Laxman Goswami Date: Thu, 3 Dec 2020 14:31:49 +0530 Subject: [PATCH 243/769] HBASE-25230 Embedded zookeeper server not clean up the old data Closes #2732 Signed-off-by: maoling Signed-off-by: Viraj Jasani --- .../hadoop/hbase/zookeeper/HQuorumPeer.java | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index 4f2a77ce6775..54c74991235a 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -44,6 +44,7 @@ import org.apache.zookeeper.server.admin.AdminServer; import org.apache.zookeeper.server.quorum.QuorumPeerConfig; import org.apache.zookeeper.server.quorum.QuorumPeerMain; +import org.apache.zookeeper.server.DatadirCleanupManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,6 +89,20 @@ public static void main(String[] args) { private static void runZKServer(QuorumPeerConfig zkConfig) throws IOException, AdminServer.AdminServerException { + + /** + * Start and schedule the purge task + * autopurge.purgeInterval is 0 by default,so in fact the DatadirCleanupManager task will not + * be started to clean the logs by default. Config is recommended only for standalone server. + */ + + DatadirCleanupManager purgeMgr=new DatadirCleanupManager( + zkConfig.getDataDir(), + zkConfig.getDataLogDir(), + zkConfig.getSnapRetainCount(), + zkConfig.getPurgeInterval()); + purgeMgr.start(); + if (zkConfig.isDistributed()) { QuorumPeerMain qp = new QuorumPeerMain(); qp.runFromConfig(zkConfig); From 62d493b2cb44c1263adb53ad490fa27c7f62466d Mon Sep 17 00:00:00 2001 From: tianhang Date: Thu, 3 Dec 2020 21:29:33 +0800 Subject: [PATCH 244/769] HBASE-25355 [Documentation] Fix spelling error (#2735) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/ops_mgt.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index d27c5d6e488f..e491cbc95b54 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -2697,7 +2697,7 @@ up, while `Source.TimeStampOfLastShippedOp`, `Source.EditsReadFromLogQueue`, `Source.OpsShippedToTarget` or `Source.TimeStampOfNextToReplicate` do not change at all, then replication flow is failing to progress, and there might be problems within clusters communication. This could also happen if replication is manually paused -(via hbase shell `disable_peer` command, for example), but date keeps getting ingested +(via hbase shell `disable_peer` command, for example), but data keeps getting ingested in the source cluster tables. == Running Multiple Workloads On a Single Cluster From f879d45fb08b8ee250ad0a268383720a36e6c42e Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 3 Dec 2020 21:16:24 +0530 Subject: [PATCH 245/769] Revert "HBASE-25246 Backup/Restore hbase cell tags" This reverts commit 56eb5c9fc8de796665fe959087cb24b9f5f1afd4. --- .../hbase/shaded/protobuf/ProtobufUtil.java | 24 +-- .../shaded/protobuf/TestProtobufUtil.java | 44 ----- .../apache/hadoop/hbase/mapreduce/Import.java | 5 +- .../hbase/mapreduce/TestImportExport.java | 158 ------------------ 4 files changed, 10 insertions(+), 221 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index cfbdd6486255..b9a08676f8ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2015,8 +2015,7 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); - kvbuilder.setTags(wrap(((ByteBufferExtendedCell) kv).getTagsByteBuffer(), - ((ByteBufferExtendedCell) kv).getTagsPosition(), kv.getTagsLength())); + // TODO : Once tags become first class then we may have to set tags to kvbuilder. } else { kvbuilder.setRow( UnsafeByteOperations.unsafeWrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); @@ -2028,8 +2027,6 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); - kvbuilder.setTags(UnsafeByteOperations.unsafeWrap(kv.getTagsArray(), kv.getTagsOffset(), - kv.getTagsLength())); } return kvbuilder.build(); } @@ -2042,17 +2039,14 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - ExtendedCellBuilder builder = cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()); - if (cell.hasTags()) { - builder.setTags(cell.getTags().toByteArray()); - } - return builder.build(); + return cellBuilder.clear() + .setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()) + .setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()) + .setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()) + .build(); } public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 791beb7ede55..7d6eda817cfa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -18,30 +18,22 @@ package org.apache.hadoop.hbase.shaded.protobuf; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; -import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -487,40 +479,4 @@ public void testRegionLockInfo() { + "\"sharedLockCount\":0" + "}]", lockJson); } - - /** - * Test {@link ProtobufUtil#toCell(Cell)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell)} conversion - * methods when it contains tags. - */ - @Test - public void testCellConversionWithTags() { - String tagStr = "tag-1"; - byte tagType = (byte)10; - Tag tag = new ArrayBackedTag(tagType, tagStr); - - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); - cellBuilder.setRow(Bytes.toBytes("row1")); - cellBuilder.setFamily(Bytes.toBytes("f1")); - cellBuilder.setQualifier(Bytes.toBytes("q1")); - cellBuilder.setValue(Bytes.toBytes("value1")); - cellBuilder.setType(Cell.Type.Delete); - cellBuilder.setTags(Collections.singletonList(tag)); - Cell cell = cellBuilder.build(); - - ClientProtos.Result protoResult = - ProtobufUtil.toResult(Result.create(Collections.singletonList(cell))); - assertNotNull(protoResult); - assertEquals(1, protoResult.getCellCount()); - - CellProtos.Cell protoCell = protoResult.getCell(0); - ExtendedCellBuilder decodedBuilder = - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); - Cell decodedCell = ProtobufUtil.toCell(decodedBuilder, protoCell); - List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(1, decodedTags.size()); - Tag decodedTag = decodedTags.get(0); - assertEquals(tagType, decodedTag.getType()); - assertEquals(tagStr, Tag.getValueAsString(decodedTag)); - } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 30071fdfd809..239a12bdc688 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -512,7 +511,6 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { - List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -526,8 +524,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength(), // value length - tags.size() == 0 ? null: tags); + kv.getValueLength()); // value length } } return kv; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 5a95fd8eecb6..12060a742a2b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -34,13 +34,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -49,12 +46,10 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -63,18 +58,11 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.CellImporter; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; -import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -129,9 +117,6 @@ public class TestImportExport { private static final long now = System.currentTimeMillis(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); - public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); - public static final String TEST_ATTR = "source_op"; - public static final String TEST_TAG = "test_tag"; @BeforeClass public static void beforeClass() throws Throwable { @@ -816,147 +801,4 @@ public boolean isWALVisited() { return isVisited; } } - - /** - * Add cell tags to delete mutations, run export and import tool and - * verify that tags are present in import table also. - * @throws Throwable throws Throwable. - */ - @Test - public void testTagsAddition() throws Throwable { - final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(exportTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); - UTIL.getAdmin().createTable(desc); - - Table exportT = UTIL.getConnection().getTable(exportTable); - - //Add first version of QUAL - Put p = new Put(ROW1); - p.addColumn(FAMILYA, QUAL, now, QUAL); - exportT.put(p); - - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); - // Add test attribute to delete mutation. - d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); - exportT.delete(d); - - // Run export too with KeyValueCodecWithTags as Codec. This will ensure that export tool - // will use KeyValueCodecWithTags. - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export - }; - assertTrue(runExport(args)); - // Assert tag exists in exportTable - assertTagExists(exportTable); - - // Create an import table with MetadataController. - final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); - TableDescriptor importTableDesc = TableDescriptorBuilder - .newBuilder(importTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); - UTIL.getAdmin().createTable(importTableDesc); - - // Run import tool. - args = new String[] { - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - importTable.getNameAsString(), - FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); - // Make sure that tags exists in imported table. - assertTagExists(importTable); - } - - private void assertTagExists(TableName table) throws IOException { - List values = new ArrayList<>(); - for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { - Scan scan = new Scan(); - // Make sure to set rawScan to true so that we will get Delete Markers. - scan.setRaw(true); - scan.readAllVersions(); - scan.withStartRow(ROW1); - // Need to use RegionScanner instead of table#getScanner since the latter will - // not return tags since it will go through rpc layer and remove tags intentionally. - RegionScanner scanner = region.getScanner(scan); - scanner.next(values); - if (!values.isEmpty()) { - break; - } - } - boolean deleteFound = false; - for (Cell cell: values) { - if (PrivateCellUtil.isDelete(cell.getType().getCode())) { - deleteFound = true; - List tags = PrivateCellUtil.getTags(cell); - Assert.assertEquals(1, tags.size()); - for (Tag tag : tags) { - Assert.assertEquals(TEST_TAG, Tag.getValueAsString(tag)); - } - } - } - Assert.assertTrue(deleteFound); - } - - /* - This co-proc will add a cell tag to delete mutation. - */ - public static class MetadataController implements RegionCoprocessor, RegionObserver { - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { - if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { - return; - } - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - if (!(m instanceof Delete)) { - continue; - } - byte[] sourceOpAttr = m.getAttribute(TEST_ATTR); - if (sourceOpAttr == null) { - continue; - } - Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); - List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { - Cell cell = cellScanner.current(); - List tags = PrivateCellUtil.getTags(cell); - tags.add(sourceOpTag); - Cell updatedCell = PrivateCellUtil.createCell(cell, tags); - updatedCells.add(updatedCell); - } - m.getFamilyCellMap().clear(); - // Clear and add new Cells to the Mutation. - for (Cell cell : updatedCells) { - Delete d = (Delete) m; - d.add(cell); - } - } - } - } } From 8634428724e12263869746fdea4214d21f7b8ce8 Mon Sep 17 00:00:00 2001 From: SteNicholas Date: Fri, 4 Dec 2020 10:56:53 +0800 Subject: [PATCH 246/769] HBASE-24966 The methods in AsyncTableRegionLocator should not throw IOException directly (#2495) Signed-off-by: Duo Zhang --- .../hadoop/hbase/client/AsyncTableRegionLocator.java | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java index 321f44e87b51..96e3ec4173a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import java.io.IOException; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; @@ -89,7 +88,6 @@ default CompletableFuture getRegionLocation(byte[] row, int rep * Find all the replicas for the region on which the given row is being served. * @param row Row to find. * @return Locations for all the replicas of the row. - * @throws IOException if a remote or network exception occurs */ default CompletableFuture> getRegionLocations(byte[] row) { return getRegionLocations(row, false); @@ -100,7 +98,6 @@ default CompletableFuture> getRegionLocations(byte[] row) * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Locations for all the replicas of the row. - * @throws IOException if a remote or network exception occurs */ CompletableFuture> getRegionLocations(byte[] row, boolean reload); @@ -120,9 +117,8 @@ default CompletableFuture> getRegionLocations(byte[] row) *

    * This is mainly useful for the MapReduce integration. * @return Array of region starting row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture> getStartKeys() throws IOException { + default CompletableFuture> getStartKeys() { return getStartEndKeys().thenApply( startEndKeys -> startEndKeys.stream().map(Pair::getFirst).collect(Collectors.toList())); } @@ -132,9 +128,8 @@ default CompletableFuture> getStartKeys() throws IOException { *

    * This is mainly useful for the MapReduce integration. * @return Array of region ending row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture> getEndKeys() throws IOException { + default CompletableFuture> getEndKeys() { return getStartEndKeys().thenApply( startEndKeys -> startEndKeys.stream().map(Pair::getSecond).collect(Collectors.toList())); } @@ -144,9 +139,8 @@ default CompletableFuture> getEndKeys() throws IOException { *

    * This is mainly useful for the MapReduce integration. * @return Pair of arrays of region starting and ending row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture>> getStartEndKeys() throws IOException { + default CompletableFuture>> getStartEndKeys() { return getAllRegionLocations().thenApply( locs -> locs.stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())) .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) From b26395f3bb9c9ddc809f434ac653b157cee0db9b Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 4 Dec 2020 08:49:56 -0800 Subject: [PATCH 247/769] HBASE-25354 Update create-release scripts; rotted README Add note on ssh-agent. dev-support/create-release/do-release.sh move gpg check to non-docker context. Also cleanup tmp files when done. dev-support/create-release/hbase-rm/Dockerfile dev-support/create-release/mac-sshd-gpg-agent/Dockerfile Hack to update packages... the old ones no longer available. dev-support/create-release/release-util.sh Allow that there are no JIRA changes in a release. Good for testing. --- dev-support/create-release/README.txt | 18 ++++++---- .../create-release/do-release-docker.sh | 2 +- dev-support/create-release/do-release.sh | 25 ++++++++----- .../create-release/hbase-rm/Dockerfile | 2 +- .../mac-sshd-gpg-agent/Dockerfile | 2 +- dev-support/create-release/release-util.sh | 36 ++++++++++++++----- 6 files changed, 57 insertions(+), 28 deletions(-) diff --git a/dev-support/create-release/README.txt b/dev-support/create-release/README.txt index 4a457ddc09ec..e696574507f9 100644 --- a/dev-support/create-release/README.txt +++ b/dev-support/create-release/README.txt @@ -9,19 +9,21 @@ To run a build w/o invoking docker (not recommended!), use _do_release.sh_. Both scripts will query interactively for needed parameters and passphrases. For explanation of the parameters, execute: + $ release-build.sh --help -Before starting the RC build, run a reconciliation of what is in -JIRA with what is in the commit log. Make sure they align and that -anomalies are explained up in JIRA. +Before starting the RC build, run a reconciliation of what is in JIRA with +what is in the commit log. Make sure they align and that anomalies are +explained up in JIRA. See http://hbase.apache.org/book.html#maven.release -Regardless of where your release build will run (locally, locally in docker, on a remote machine, -etc) you will need a local gpg-agent with access to your secret keys. A quick way to tell gpg -to clear out state and start a gpg-agent is via the following command phrase: +Regardless of where your release build will run (locally, locally in docker, +on a remote machine, etc) you will need a local gpg-agent with access to your +secret keys. A quick way to tell gpg to clear out state and start a gpg-agent +is via the following command phrase: -$ gpgconf --kill all && gpg-connect-agent /bye + $ gpgconf --kill all && gpg-connect-agent /bye Before starting an RC build, make sure your local gpg-agent has configs to properly handle your credentials, especially if you want to avoid @@ -33,6 +35,8 @@ on caching the unlocked secret via ~/.gnupg/gpg-agent.conf default-cache-ttl 86400 max-cache-ttl 86400 +Similarly, run ssh-agent with your ssh key added if building with docker. + Running a build on GCE is easy enough. Here are some notes if of use. Create an instance. 4CPU/15G/10G disk seems to work well enough. Once up, run the below to make your machine fit for RC building: diff --git a/dev-support/create-release/do-release-docker.sh b/dev-support/create-release/do-release-docker.sh index e863cb373a0c..b48581f9165b 100755 --- a/dev-support/create-release/do-release-docker.sh +++ b/dev-support/create-release/do-release-docker.sh @@ -302,7 +302,7 @@ if [ "${HOST_OS}" == "DARWIN" ]; then > "${WORKDIR}/gpg-agent-proxy.known_hosts" if [ -s "${WORKDIR}/gpg-agent-proxy.known_hosts" ]; then echo "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." - echo "The following entry(ies) arre missing:" + echo "The following entry(ies) are missing:" sed -e 's/^/ /' "${WORKDIR}/gpg-agent-proxy.known_hosts" read -r -p "Okay to add these entries to ${HOME}/.ssh/known_hosts? [y/n] " ANSWER if [ "$ANSWER" != "y" ]; then diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index 9500801c247b..5566b36c21e2 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -17,6 +17,10 @@ # limitations under the License. # +# Make a tmp dir into which we put files cleaned-up on exit. +TMPDIR=$(mktemp -d) +trap "rm -rf $TMPDIR" EXIT + set -e # Use the adjacent do-release-docker.sh instead, if you can. # Otherwise, this runs core of the release creation. @@ -84,19 +88,22 @@ if [ "$RUNNING_IN_DOCKER" = "1" ]; then else # Outside docker, need to ask for information about the release. get_release_info + + # Run this stuff when not in docker to check gpg. + gpg_test_file="${TMPDIR}/gpg_test.$$.txt" + echo "Testing gpg signing ${GPG} ${GPG_ARGS[@]} --detach --armor --sign ${gpg_test_file}" + echo "foo" > "${gpg_test_file}" + if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign "${gpg_test_file}" ; then + gpg_agent_help + fi + # In --batch mode we have to be explicit about what we are verifying + if ! "${GPG}" "${GPG_ARGS[@]}" --verify "${gpg_test_file}.asc" "${gpg_test_file}" ; then + gpg_agent_help + fi fi GPG_TTY="$(tty)" export GPG_TTY -echo "Testing gpg signing." -echo "foo" > gpg_test.txt -if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign gpg_test.txt ; then - gpg_agent_help -fi -# In --batch mode we have to be explicit about what we are verifying -if ! "${GPG}" "${GPG_ARGS[@]}" --verify gpg_test.txt.asc gpg_test.txt ; then - gpg_agent_help -fi if [[ -z "$RELEASE_STEP" ]]; then # If doing all stages, leave out 'publish-snapshot' diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index 2c29974cfbba..26cb7e51abb3 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -34,7 +34,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ libxml2-dev='2.9.4+dfsg1-*' \ lsof='4.89+dfsg-*' \ maven='3.6.0-*' \ - openjdk-8-jdk='8u252-b09-*' \ + openjdk-8-jdk='8u*' \ python-pip='9.0.1-*' \ subversion='1.9.7-*' \ wget='1.19.4-*' \ diff --git a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile index a71d867613b1..3d206dc83365 100644 --- a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile +++ b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile @@ -83,7 +83,7 @@ FROM ubuntu:18.04 # into the container rather than launching a new docker container. RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && DEBIAN_FRONTEND=noninteractive apt-get -qq -y install --no-install-recommends \ - openssh-server=1:7.6p1-4ubuntu0.3 gnupg2=2.2.4-1ubuntu1.2 && mkdir /run/sshd \ + openssh-server=1:7.6p1-4ubuntu0.3 gnupg2=2.2.4-1ubuntu1.3 && mkdir /run/sshd \ && echo "StreamLocalBindUnlink yes" >> /etc/ssh/sshd_config \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 64654bba4b86..4dffd672c2ae 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -501,10 +501,17 @@ function update_releasenotes { local jira_project local timing_token timing_token="$(start_step)" + changelog="CHANGELOG.${jira_fix_version}.md" + releasenotes="RELEASENOTES.${jira_fix_version}.md" + if [ -f ${changelog} ]; then + rm ${changelog} + fi + if [ -f ${releasenotes} ]; then + rm ${releasenotes} + fi jira_project="$(get_jira_name "$(basename "$project_dir")")" "${YETUS_HOME}/bin/releasedocmaker" -p "${jira_project}" --fileversions -v "${jira_fix_version}" \ - -l --sortorder=newer --skip-credits - pwd + -l --sortorder=newer --skip-credits || true # First clear out the changes written by previous RCs. if [ -f "${project_dir}/CHANGES.md" ]; then sed -i -e \ @@ -517,24 +524,35 @@ function update_releasenotes { "${project_dir}/RELEASENOTES.md" || true fi + # Yetus will not generate CHANGES if no JIRAs fixed against the release version + # (Could happen if a release were bungled such that we had to make a new one + # without changes) + if [ ! -f "${changelog}" ]; then + echo -e "## Release ${jira_fix_version} - Unreleased (as of `date`)\nNo changes\n" > "${changelog}" + fi + if [ ! -f "${releasenotes}" ]; then + echo -e "# hbase ${jira_fix_version} Release Notes\nNo changes\n" > "${releasenotes}" + fi + # The releasedocmaker call above generates RELEASENOTES.X.X.X.md and CHANGELOG.X.X.X.md. if [ -f "${project_dir}/CHANGES.md" ]; then # To insert into project's CHANGES.md...need to cut the top off the # CHANGELOG.X.X.X.md file removing license and first line and then # insert it after the license comment closing where we have a # DO NOT REMOVE marker text! - sed -i -e '/## Release/,$!d' "CHANGELOG.${jira_fix_version}.md" - sed -i -e "/DO NOT REMOVE/r CHANGELOG.${jira_fix_version}.md" "${project_dir}/CHANGES.md" + sed -i -e '/## Release/,$!d' "${changelog}" + sed -i -e '2,${/^# HBASE Changelog/d;}' "${project_dir}/CHANGES.md" + sed -i -e "/DO NOT REMOVE/r ${changelog}" "${project_dir}/CHANGES.md" else - mv "CHANGELOG.${jira_fix_version}.md" "${project_dir}/CHANGES.md" + mv "${changelog}" "${project_dir}/CHANGES.md" fi if [ -f "${project_dir}/RELEASENOTES.md" ]; then # Similar for RELEASENOTES but slightly different. - sed -i -e '/Release Notes/,$!d' "RELEASENOTES.${jira_fix_version}.md" - sed -i -e "/DO NOT REMOVE/r RELEASENOTES.${jira_fix_version}.md" \ - "${project_dir}/RELEASENOTES.md" + sed -i -e '/Release Notes/,$!d' "${releasenotes}" + sed -i -e '2,${/^# RELEASENOTES/d;}' "${project_dir}/RELEASENOTES.md" + sed -i -e "/DO NOT REMOVE/r ${releasenotes}" "${project_dir}/RELEASENOTES.md" else - mv "RELEASENOTES.${jira_fix_version}.md" "${project_dir}/RELEASENOTES.md" + mv "${releasenotes}" "${project_dir}/RELEASENOTES.md" fi stop_step "${timing_token}" } From 6791b29771892dff7ae8d16de2c99502162f2e69 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 5 Dec 2020 08:55:59 -0800 Subject: [PATCH 248/769] HBASE-25361 [Flakey Tests] branch-2 TestMetaRegionLocationCache.testStandByMetaLocations (#2736) Add a bit of a wait before testing if online replicas match the zk count. It might take a while for all replicas to come online. Signed-off-by: huaxiangsun --- .../hadoop/hbase/client/TestMetaRegionLocationCache.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index d42c1240f9ef..577e15cedfb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -98,6 +98,14 @@ private void verifyCachedMetaLocations(HMaster master) throws Exception { assertFalse(metaHRLs.isEmpty()); ZKWatcher zk = master.getZooKeeper(); List metaZnodes = zk.getMetaReplicaNodes(); + // Wait till all replicas available. + while (master.getMetaRegionLocationCache().getMetaRegionLocations().get().size() != + metaZnodes.size()) { + Thread.sleep(1000); + if (++retries == 10) { + break; + } + } assertEquals(metaZnodes.size(), metaHRLs.size()); List actualHRLs = getCurrentMetaLocations(zk); Collections.sort(metaHRLs); From 7d0a687e5798a2f4ca3190b409169f7e17a75b34 Mon Sep 17 00:00:00 2001 From: stack Date: Sat, 5 Dec 2020 14:00:18 -0800 Subject: [PATCH 249/769] HBASE-25361 [Flakey Tests] branch-2 TestMetaRegionLocationCache.testStandByMetaLocations (#2736) Addendum; Reset counter so we actually wait in the new loop added by the above. --- .../apache/hadoop/hbase/client/TestMetaRegionLocationCache.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index 577e15cedfb9..2bcddc9ea7f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -99,6 +99,7 @@ private void verifyCachedMetaLocations(HMaster master) throws Exception { ZKWatcher zk = master.getZooKeeper(); List metaZnodes = zk.getMetaReplicaNodes(); // Wait till all replicas available. + retries = 0; while (master.getMetaRegionLocationCache().getMetaRegionLocations().get().size() != metaZnodes.size()) { Thread.sleep(1000); @@ -119,6 +120,7 @@ private void verifyCachedMetaLocations(HMaster master) throws Exception { @Test public void testStandByMetaLocations() throws Exception { HMaster standBy = TEST_UTIL.getMiniHBaseCluster().startMaster().getMaster(); + standBy.isInitialized(); verifyCachedMetaLocations(standBy); } From f8134795109bc380b53ec814561e1abdb56b2b58 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 7 Dec 2020 21:49:04 +0800 Subject: [PATCH 250/769] HBASE-25336 Use Address instead of InetSocketAddress in RpcClient implementation (#2716) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/ipc/AbstractRpcClient.java | 60 ++++--------------- .../hbase/ipc/BlockingRpcConnection.java | 25 ++------ .../hadoop/hbase/ipc/NettyRpcConnection.java | 27 ++------- .../hadoop/hbase/ipc/RpcConnection.java | 24 +++++++- 4 files changed, 39 insertions(+), 97 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 7a7b848304ff..e9ec6a92ee93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -22,9 +22,7 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.wrapException; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.SocketAddress; -import java.net.UnknownHostException; import java.util.Collection; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -320,7 +318,7 @@ private int nextCallId() { * @return A pair with the Message response and the Cell data (if any). */ private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc, - Message param, Message returnType, final User ticket, final InetSocketAddress isa) + Message param, Message returnType, final User ticket, final Address isa) throws ServiceException { BlockingRpcCallback done = new BlockingRpcCallback<>(); callMethod(md, hrc, param, returnType, ticket, isa, done); @@ -392,7 +390,7 @@ private void onCallFinished(Call call, HBaseRpcController hrc, Address addr, Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController hrc, final Message param, Message returnType, final User ticket, - final InetSocketAddress inetAddr, final RpcCallback callback) { + final Address addr, final RpcCallback callback) { final MetricsConnection.CallStats cs = MetricsConnection.newCallStats(); cs.setStartTime(EnvironmentEdgeManager.currentTime()); @@ -406,7 +404,6 @@ Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController cs.setNumActionsPerServer(numActions); } - final Address addr = Address.fromSocketAddress(inetAddr); final AtomicInteger counter = concurrentCounterCache.getUnchecked(addr); Call call = new Call(nextCallId(), md, param, hrc.cellScanner(), returnType, hrc.getCallTimeout(), hrc.getPriority(), new RpcCallback() { @@ -520,13 +517,6 @@ private static class AbstractRpcChannel { protected final Address addr; - // We cache the resolved InetSocketAddress for the channel so we do not do a DNS lookup - // per method call on the channel. If the remote target is removed or reprovisioned and - // its identity changes a new channel with a newly resolved InetSocketAddress will be - // created as part of retry, so caching here is fine. - // Normally, caching an InetSocketAddress is an anti-pattern. - protected InetSocketAddress isa; - protected final AbstractRpcClient rpcClient; protected final User ticket; @@ -576,23 +566,9 @@ protected BlockingRpcChannelImplementation(AbstractRpcClient rpcClient, @Override public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController controller, - Message param, Message returnType) throws ServiceException { - // Look up remote address upon first call - if (isa == null) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - isa = Address.toSocketAddress(addr); - if (isa.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - isa = null; - throw new ServiceException(new UnknownHostException(addr + " could not be resolved")); - } - } - return rpcClient.callBlockingMethod(md, configureRpcController(controller), - param, returnType, ticket, isa); + Message param, Message returnType) throws ServiceException { + return rpcClient.callBlockingMethod(md, configureRpcController(controller), param, returnType, + ticket, addr); } } @@ -608,29 +584,13 @@ protected RpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, } @Override - public void callMethod(Descriptors.MethodDescriptor md, RpcController controller, - Message param, Message returnType, RpcCallback done) { - HBaseRpcController configuredController = - configureRpcController(Preconditions.checkNotNull(controller, - "RpcController can not be null for async rpc call")); - // Look up remote address upon first call - if (isa == null || isa.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - isa = Address.toSocketAddress(addr); - if (isa.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - isa = null; - controller.setFailed(addr + " could not be resolved"); - return; - } - } + public void callMethod(Descriptors.MethodDescriptor md, RpcController controller, Message param, + Message returnType, RpcCallback done) { + HBaseRpcController configuredController = configureRpcController( + Preconditions.checkNotNull(controller, "RpcController can not be null for async rpc call")); // This method does not throw any exceptions, so the caller must provide a // HBaseRpcController which is used to pass the exceptions. - this.rpcClient.callMethod(md, configuredController, param, returnType, ticket, isa, done); + this.rpcClient.callMethod(md, configuredController, param, returnType, ticket, addr, done); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index ce2bd11f960a..cd8035fd58ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -35,7 +35,6 @@ import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketTimeoutException; -import java.net.UnknownHostException; import java.security.PrivilegedExceptionAction; import java.util.ArrayDeque; import java.util.Locale; @@ -44,7 +43,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ThreadLocalRandom; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -52,7 +50,6 @@ import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback; import org.apache.hadoop.hbase.log.HBaseMarkers; -import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.HBaseSaslRpcClient; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection; @@ -69,11 +66,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; @@ -256,16 +255,7 @@ protected void setupConnection() throws IOException { if (this.rpcClient.localAddr != null) { this.socket.bind(this.rpcClient.localAddr); } - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - InetSocketAddress remoteAddr = Address.toSocketAddress(remoteId.getAddress()); - if (remoteAddr.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } + InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics); NetUtils.connect(this.socket, remoteAddr, this.rpcClient.connectTO); this.socket.setSoTimeout(this.rpcClient.readTO); return; @@ -374,15 +364,8 @@ private boolean setupSaslConnection(final InputStream in2, final OutputStream ou if (this.metrics != null) { this.metrics.incrNsLookups(); } - InetSocketAddress serverAddr = Address.toSocketAddress(remoteId.getAddress()); - if (serverAddr.isUnresolved()) { - if (this.metrics != null) { - this.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } saslRpcClient = new HBaseSaslRpcClient(this.rpcClient.conf, provider, token, - serverAddr.getAddress(), securityInfo, this.rpcClient.fallbackAllowed, + socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, this.rpcClient.conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 609d2c12ceae..d0a13ca33d6c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -32,17 +32,16 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallEvent; import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback; -import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.NettyHBaseRpcConnectionHeaderHandler; import org.apache.hadoop.hbase.security.NettyHBaseSaslRpcClientHandler; import org.apache.hadoop.hbase.security.SaslChallengeDecoder; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; @@ -210,18 +209,9 @@ private void saslNegotiate(final Channel ch) { Promise saslPromise = ch.eventLoop().newPromise(); final NettyHBaseSaslRpcClientHandler saslHandler; try { - if (this.metrics != null) { - this.metrics.incrNsLookups(); - } - InetSocketAddress serverAddr = Address.toSocketAddress(remoteId.getAddress()); - if (serverAddr.isUnresolved()) { - if (this.metrics != null) { - this.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, provider, token, - serverAddr.getAddress(), securityInfo, rpcClient.fallbackAllowed, this.rpcClient.conf); + ((InetSocketAddress) ch.remoteAddress()).getAddress(), securityInfo, + rpcClient.fallbackAllowed, this.rpcClient.conf); } catch (IOException e) { failInit(ch, e); return; @@ -282,16 +272,7 @@ public void operationComplete(Future future) throws Exception { private void connect() throws UnknownHostException { assert eventLoop.inEventLoop(); LOG.trace("Connecting to {}", remoteId.getAddress()); - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - InetSocketAddress remoteAddr = Address.toSocketAddress(remoteId.getAddress()); - if (remoteAddr.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } + InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics); this.channel = new Bootstrap().group(eventLoop).channel(rpcClient.channelClass) .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay()) .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 6749efe66f3e..b2c7eeae4a5a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -18,12 +18,15 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; @@ -122,7 +125,7 @@ protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, Conne this.remoteId = remoteId; } - protected void scheduleTimeoutTask(final Call call) { + protected final void scheduleTimeoutTask(final Call call) { if (call.timeout > 0) { call.timeoutTask = timeoutTimer.newTimeout(new TimerTask() { @@ -137,7 +140,7 @@ public void run(Timeout timeout) throws Exception { } } - protected byte[] getConnectionHeaderPreamble() { + protected final byte[] getConnectionHeaderPreamble() { // Assemble the preamble up in a buffer first and then send it. Writing individual elements, // they are getting sent across piecemeal according to wireshark and then server is messing // up the reading on occasion (the passed in stream is not buffered yet). @@ -153,7 +156,7 @@ protected byte[] getConnectionHeaderPreamble() { return preamble; } - protected ConnectionHeader getConnectionHeader() { + protected final ConnectionHeader getConnectionHeader() { final ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); builder.setServiceName(remoteId.getServiceName()); final UserInformation userInfoPB = provider.getUserInfo(remoteId.ticket); @@ -176,6 +179,21 @@ protected ConnectionHeader getConnectionHeader() { return builder.build(); } + protected final InetSocketAddress getRemoteInetAddress(MetricsConnection metrics) + throws UnknownHostException { + if (metrics != null) { + metrics.incrNsLookups(); + } + InetSocketAddress remoteAddr = Address.toSocketAddress(remoteId.getAddress()); + if (remoteAddr.isUnresolved()) { + if (metrics != null) { + metrics.incrNsLookupsFailed(); + } + throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); + } + return remoteAddr; + } + protected abstract void callTimeout(Call call); public ConnectionId remoteId() { From 9a72b27f384bc92f930c96d4ab2573152b201d89 Mon Sep 17 00:00:00 2001 From: SWH12 <34267571+SWH12@users.noreply.github.com> Date: Mon, 7 Dec 2020 22:12:56 +0800 Subject: [PATCH 251/769] HBASE-25366 [Documentation] Fix spelling error in sync_replication.adoc(#2744) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/sync_replication.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/sync_replication.adoc b/src/main/asciidoc/_chapters/sync_replication.adoc index d28b9a9731a3..82b3c7e21780 100644 --- a/src/main/asciidoc/_chapters/sync_replication.adoc +++ b/src/main/asciidoc/_chapters/sync_replication.adoc @@ -99,7 +99,7 @@ hbase> transit_peer_sync_replication_state '1', 'ACTIVE' Case.3 How to operate when active cluster crashed:: If the active cluster has been crashed (it may be not reachable now), so let's just transit the standby cluster to -DOWNGRANDE_ACTIVE state, and after that, we should redirect all the requests from client to the DOWNGRADE_ACTIVE cluster. +DOWNGRADE_ACTIVE state, and after that, we should redirect all the requests from client to the DOWNGRADE_ACTIVE cluster. [source,ruby] ---- From fb6e498b32e48aa606ef5427013fd84452cc762f Mon Sep 17 00:00:00 2001 From: Pankaj Date: Mon, 7 Dec 2020 23:00:48 +0530 Subject: [PATCH 252/769] HBASE-25277 postScannerFilterRow impacts Scan performance a lot in HBase 2.x (#2675) * HBASE-25277 postScannerFilterRow impacts Scan performance a lot in HBase 2.x 1. Added a check for Object class in RegionCoprocessorHost to avoid wrong initialization of hasCustomPostScannerFilterRow 2. Removed dummy implementation of postScannerFilterRow from AccessController, VisibilityController & ConstraintProcessor (which are not required currently) Signed-off-by Ramkrishna S Vasudevan Signed-off-by Anoop Sam John Signed-off-by: Duo Zhang --- .../hbase/constraint/ConstraintProcessor.java | 18 ++---- .../regionserver/RegionCoprocessorHost.java | 17 ++++-- .../security/access/AccessController.java | 7 --- .../visibility/VisibilityController.java | 7 --- .../TestRegionCoprocessorHost.java | 57 +++++++++++++++++-- 5 files changed, 69 insertions(+), 37 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index 6aa5d977b678..b0a04c5044ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -22,20 +22,19 @@ import java.util.List; import java.util.Optional; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.yetus.audience.InterfaceAudience; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /*** * Processes multiple {@link Constraint Constraints} on a given table. @@ -98,11 +97,4 @@ public void prePut(ObserverContext e, Put put, } // if we made it here, then the Put is valid } - - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 5ebf7e1c1590..7ed23f695ecd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -79,7 +79,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.org.apache.commons.collections4.map.AbstractReferenceMap; @@ -102,6 +101,13 @@ public class RegionCoprocessorHost // optimization: no need to call postScannerFilterRow, if no coprocessor implements it private final boolean hasCustomPostScannerFilterRow; + /* + * Whether any configured CPs override postScannerFilterRow hook + */ + public boolean hasCustomPostScannerFilterRow() { + return hasCustomPostScannerFilterRow; + } + /** * * Encapsulation of the environment of each coprocessor @@ -275,11 +281,10 @@ public RegionCoprocessorHost(final HRegion region, out: for (RegionCoprocessorEnvironment env: coprocEnvironments) { if (env.getInstance() instanceof RegionObserver) { Class clazz = env.getInstance().getClass(); - for(;;) { - if (clazz == null) { - // we must have directly implemented RegionObserver - hasCustomPostScannerFilterRow = true; - break out; + for (;;) { + if (clazz == Object.class) { + // we dont need to look postScannerFilterRow into Object class + break; // break the inner loop } try { clazz.getDeclaredMethod("postScannerFilterRow", ObserverContext.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 3a6c3aae657b..75bc73ccdcd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1848,13 +1848,6 @@ public void postScannerClose(final ObserverContext scannerOwners.remove(s); } - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } - /** * Verify, when servicing an RPC, that the caller is the scanner owner. * If so, we assume that access control is correctly enforced based on diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 37f25a83ea72..7c4b7abb8bff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -678,13 +678,6 @@ private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOExc return PrivateCellUtil.createCell(newCell, tags); } - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } - /****************************** VisibilityEndpoint service related methods ******************************/ @Override public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java index 423a412f75c1..b0188d9b7ce5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java @@ -22,11 +22,14 @@ import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR; import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.Optional; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -58,7 +61,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import java.io.IOException; @Category({SmallTests.class}) public class TestRegionCoprocessorHost { @@ -79,19 +81,36 @@ public class TestRegionCoprocessorHost { @Before public void setup() throws IOException { + init(null); + } + + private void init(Boolean flag) throws IOException { conf = HBaseConfiguration.create(); conf.setBoolean(COPROCESSORS_ENABLED_CONF_KEY, true); conf.setBoolean(USER_COPROCESSORS_ENABLED_CONF_KEY, true); TableName tableName = TableName.valueOf(name.getMethodName()); regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); - // config a same coprocessor with system coprocessor - TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setCoprocessor(SimpleRegionObserver.class.getName()).build(); + TableDescriptor tableDesc = null; + if (flag == null) { + // configure a coprocessor which override postScannerFilterRow + tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setCoprocessor(SimpleRegionObserver.class.getName()).build(); + } else if (flag) { + // configure a coprocessor which don't override postScannerFilterRow + tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setCoprocessor(TempRegionObserver.class.getName()).build(); + } else { + // configure two coprocessors, one don't override postScannerFilterRow but another one does + conf.set(REGION_COPROCESSOR_CONF_KEY, TempRegionObserver.class.getName()); + tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setCoprocessor(SimpleRegionObserver.class.getName()).build(); + } region = mock(HRegion.class); when(region.getRegionInfo()).thenReturn(regionInfo); when(region.getTableDescriptor()).thenReturn(tableDesc); rsServices = mock(RegionServerServices.class); } + @Test public void testLoadDuplicateCoprocessor() throws Exception { conf.setBoolean(SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR, true); @@ -158,6 +177,27 @@ public void testPreMemStoreCompactionCompactScannerOpen() throws IOException { verifyScanInfo(newScanInfo); } + @Test + public void testPostScannerFilterRow() throws IOException { + // By default SimpleRegionObserver is set as region coprocessor which implements + // postScannerFilterRow + RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf); + assertTrue("Region coprocessor implement postScannerFilterRow", + host.hasCustomPostScannerFilterRow()); + + // Set a region CP which doesn't implement postScannerFilterRow + init(true); + host = new RegionCoprocessorHost(region, rsServices, conf); + assertFalse("Region coprocessor implement postScannerFilterRow", + host.hasCustomPostScannerFilterRow()); + + // Set multiple region CPs, in which one implements postScannerFilterRow + init(false); + host = new RegionCoprocessorHost(region, rsServices, conf); + assertTrue("Region coprocessor doesn't implement postScannerFilterRow", + host.hasCustomPostScannerFilterRow()); + } + private void verifyScanInfo(ScanInfo newScanInfo) { assertEquals(KeepDeletedCells.TRUE, newScanInfo.getKeepDeletedCells()); assertEquals(MAX_VERSIONS, newScanInfo.getMaxVersions()); @@ -175,4 +215,13 @@ private ScanInfo getScanInfo() { CellComparator.getInstance(), true); } + /* + * Simple region coprocessor which doesn't override postScannerFilterRow + */ + public static class TempRegionObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + } } From 4b8d3624f4f7059ebcd7e9ea14a3ee3c9fce0ef6 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Tue, 8 Dec 2020 10:41:25 +0800 Subject: [PATCH 253/769] Add niuyulin as committer --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 05fde4f5453d..deab43804362 100755 --- a/pom.xml +++ b/pom.xml @@ -471,6 +471,12 @@ ndimiduk@apache.org -8 + + niuyulin + Yulin Niu + niuyulin@apache.org + +8 + nkeywal Nicolas Liochon From 979ad0f3fc240c88f746695f7650076ab9cf824b Mon Sep 17 00:00:00 2001 From: XinSun Date: Tue, 8 Dec 2020 10:49:39 +0800 Subject: [PATCH 254/769] Add Xin Sun as a developer --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index deab43804362..fe53e353b68f 100755 --- a/pom.xml +++ b/pom.xml @@ -686,6 +686,12 @@ wangzheng@apache.org +8 + + sunxin + Xin Sun + sunxin@apache.org + +8 + From 5016219d3c6aeb8049c3c6864427a3e653a14a07 Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Tue, 8 Dec 2020 17:58:00 +0530 Subject: [PATCH 255/769] HBASE-25328 : Add builder method to create Tags Closes #2707 Signed-off-by: Anoop Sam John Signed-off-by: Geoffrey Jacoby Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/RawCell.java | 8 ++ .../org/apache/hadoop/hbase/TagBuilder.java | 50 ++++++++++++ .../hadoop/hbase/TagBuilderFactory.java | 73 +++++++++++++++++ .../apache/hadoop/hbase/TestTagBuilder.java | 78 +++++++++++++++++++ 4 files changed, 209 insertions(+) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index ea598d21ca3b..85f8b278de47 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import java.util.Iterator; +import java.util.List; import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; @@ -64,4 +65,11 @@ public static void checkForTagsLength(int tagsLength) { throw new IllegalArgumentException("tagslength " + tagsLength + " > " + MAX_TAGS_LENGTH); } } + + /** + * @return A new cell which is having the extra tags also added to it. + */ + public static Cell createCell(Cell cell, List tags) { + return PrivateCellUtil.createCell(cell, tags); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java new file mode 100644 index 000000000000..372144c6c268 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java @@ -0,0 +1,50 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Builder implementation to create {@link Tag} + * Call setTagValue(byte[]) method to create {@link ArrayBackedTag} + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +public interface TagBuilder { + /** + * Set type of the tag. + * @param tagType type of the tag + * @return {@link TagBuilder} + */ + TagBuilder setTagType(byte tagType); + + /** + * Set the value of the tag. + * @param tagBytes tag bytes. + * @return {@link TagBuilder} + */ + TagBuilder setTagValue(byte[] tagBytes); + + /** + * Build the tag. + * @return {@link Tag} + */ + Tag build(); +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java new file mode 100644 index 000000000000..40744f91abf0 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java @@ -0,0 +1,73 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Factory to create Tags. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +public final class TagBuilderFactory { + + public static TagBuilder create() { + return new TagBuilderImpl(); + } +} + +/** + * Builder implementation to create {@link Tag}
    + * Call setTagValue(byte[]) method to create {@link ArrayBackedTag} + */ +class TagBuilderImpl implements TagBuilder { + // This assumes that we never create tag with value less than 0. + private byte tagType = (byte)-1; + private byte[] tagBytes = null; + public static final String TAG_TYPE_NOT_SET_EXCEPTION = "Need to set type of the tag."; + public static final String TAG_VALUE_NULL_EXCEPTION = "TagBytes can't be null"; + + @Override + public TagBuilder setTagType(byte tagType) { + this.tagType = tagType; + return this; + } + + @Override + public TagBuilder setTagValue(byte[] tagBytes) { + this.tagBytes = tagBytes; + return this; + } + + private void validate() { + if (tagType == -1) { + throw new IllegalArgumentException(TAG_TYPE_NOT_SET_EXCEPTION); + } + if (tagBytes == null) { + throw new IllegalArgumentException(TAG_VALUE_NULL_EXCEPTION); + } + } + + @Override + public Tag build() { + validate(); + return new ArrayBackedTag(tagType, tagBytes); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java new file mode 100644 index 000000000000..b50aa2df645b --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java @@ -0,0 +1,78 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.nio.ByteBuffer; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MiscTests.class, SmallTests.class}) +public class TestTagBuilder { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTagBuilder.class); + + @Test + public void testArrayBackedTagBuilder() { + byte type = (byte)50; + String value = "Array-Backed-Tag"; + TagBuilder builder = TagBuilderFactory.create(); + assertTrue(builder instanceof TagBuilderImpl); + builder.setTagType(type); + builder.setTagValue(Bytes.toBytes(value)); + Tag tag = builder.build(); + assertEquals(value, Tag.getValueAsString(tag)); + assertEquals(type, tag.getType()); + } + + @Test + public void testErrorMessages() { + String arrayValue = "Array-Backed-Tag"; + TagBuilder builder = TagBuilderFactory.create(); + builder.setTagValue(Bytes.toBytes(arrayValue)); + try { + // Dont set type for the tag. + builder.build(); + fail("Shouldn't have come here."); + } catch(IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains(TagBuilderImpl.TAG_TYPE_NOT_SET_EXCEPTION)); + } + + byte type = (byte)50; + builder = TagBuilderFactory.create(); + builder.setTagType(type); + try { + // Need to Call setTagValue(byte[]) to set the value. + builder.build(); + fail("Shouldn't have come here."); + } catch(IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains(TagBuilderImpl.TAG_VALUE_NULL_EXCEPTION)); + } + } +} From 56dd3eba81bd5d034aa6fb33e488116573306f9e Mon Sep 17 00:00:00 2001 From: haxiaolin Date: Tue, 8 Dec 2020 17:21:16 +0800 Subject: [PATCH 256/769] HBASE-25363 Improve performance of HFileLinkCleaner by using ReadWriteLock instead of synchronize --- .../master/cleaner/HFileLinkCleaner.java | 95 +++++++++++-------- 1 file changed, 55 insertions(+), 40 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java index a99c784d2ac8..b19e174be0c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.cleaner; import java.io.IOException; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -44,63 +45,75 @@ public class HFileLinkCleaner extends BaseHFileCleanerDelegate { private static final Logger LOG = LoggerFactory.getLogger(HFileLinkCleaner.class); private FileSystem fs = null; + private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @Override - public synchronized boolean isFileDeletable(FileStatus fStat) { - if (this.fs == null) return false; - Path filePath = fStat.getPath(); - // HFile Link is always deletable - if (HFileLink.isHFileLink(filePath)) return true; + public boolean isFileDeletable(FileStatus fStat) { + lock.readLock().lock(); + try { + if (this.fs == null) { + return false; + } + Path filePath = fStat.getPath(); + // HFile Link is always deletable + if (HFileLink.isHFileLink(filePath)) { + return true; + } - // If the file is inside a link references directory, means that it is a back ref link. - // The back ref can be deleted only if the referenced file doesn't exists. - Path parentDir = filePath.getParent(); - if (HFileLink.isBackReferencesDir(parentDir)) { - Path hfilePath = null; - try { - // Also check if the HFile is in the HBASE_TEMP_DIRECTORY; this is where the referenced - // file gets created when cloning a snapshot. - hfilePath = HFileLink.getHFileFromBackReference( - new Path(CommonFSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath); - if (fs.exists(hfilePath)) { - return false; - } - // check whether the HFileLink still exists in mob dir. - hfilePath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(getConf()), filePath); - if (fs.exists(hfilePath)) { + // If the file is inside a link references directory, means that it is a back ref link. + // The back ref can be deleted only if the referenced file doesn't exists. + Path parentDir = filePath.getParent(); + if (HFileLink.isBackReferencesDir(parentDir)) { + Path hfilePath = null; + try { + // Also check if the HFile is in the HBASE_TEMP_DIRECTORY; this is where the referenced + // file gets created when cloning a snapshot. + hfilePath = HFileLink.getHFileFromBackReference(new Path( + CommonFSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath); + if (fs.exists(hfilePath)) { + return false; + } + // check whether the HFileLink still exists in mob dir. + hfilePath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(getConf()), filePath); + if (fs.exists(hfilePath)) { + return false; + } + hfilePath = HFileLink.getHFileFromBackReference(CommonFSUtils.getRootDir(getConf()), + filePath); + return !fs.exists(hfilePath); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Couldn't verify if the referenced file still exists, keep it just in case: " + + hfilePath); + } return false; } - hfilePath = - HFileLink.getHFileFromBackReference(CommonFSUtils.getRootDir(getConf()), filePath); - return !fs.exists(hfilePath); + } + + // HFile is deletable only if has no links + Path backRefDir = null; + try { + backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName()); + return CommonFSUtils.listStatus(fs, backRefDir) == null; } catch (IOException e) { if (LOG.isDebugEnabled()) { - LOG.debug("Couldn't verify if the referenced file still exists, keep it just in case: " + - hfilePath); + LOG.debug( + "Couldn't get the references, not deleting file, just in case. filePath=" + + filePath + ", backRefDir=" + backRefDir); } return false; } - } - - // HFile is deletable only if has no links - Path backRefDir = null; - try { - backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName()); - return CommonFSUtils.listStatus(fs, backRefDir) == null; - } catch (IOException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Couldn't get the references, not deleting file, just in case. filePath=" - + filePath + ", backRefDir=" + backRefDir); - } - return false; + } finally { + lock.readLock().unlock(); } } @Override - public synchronized void setConf(Configuration conf) { + public void setConf(Configuration conf) { super.setConf(conf); // setup filesystem + lock.writeLock().lock(); try { this.fs = FileSystem.get(this.getConf()); } catch (IOException e) { @@ -109,6 +122,8 @@ public synchronized void setConf(Configuration conf) { + FileSystem.FS_DEFAULT_NAME_KEY + "=" + getConf().get(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS)); } + } finally { + lock.writeLock().unlock(); } } } From fbe338de1a0b2b19ff9c4b7ec5180cf94b40f28e Mon Sep 17 00:00:00 2001 From: z-york Date: Tue, 8 Dec 2020 23:34:18 -0800 Subject: [PATCH 257/769] HBASE-25362 Fix quoting in hbase-vote to avoid voting build failures (#2737) Signed-off-by: Nick Dimiduk Signed-off-by: Stephen Wu --- dev-support/hbase-vote.sh | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/dev-support/hbase-vote.sh b/dev-support/hbase-vote.sh index d608f1e5e4a4..11267757b253 100755 --- a/dev-support/hbase-vote.sh +++ b/dev-support/hbase-vote.sh @@ -40,10 +40,13 @@ Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file- https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests - -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0> Defaults to unset + -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0 -D skipTests> Defaults to unset __EOF } +MVN_PROFILES=() +MVN_PROPERTIES=() + while ((${#})); do case "${1}" in -h | --help ) @@ -57,9 +60,9 @@ while ((${#})); do -o | --output-dir ) OUTPUT_DIR="${2}"; shift 2 ;; -P ) - MVN_PROFILES="-P ${2}"; shift 2 ;; + MVN_PROFILES+=("-P ${2}"); shift 2 ;; -D ) - MVN_PROPERTIES="-D ${2}"; shift 2 ;; + MVN_PROPERTIES+=("-D ${2}"); shift 2 ;; * ) usage >&2; exit 1 ;; esac @@ -92,8 +95,8 @@ if [ ! -d "${OUTPUT_DIR}" ]; then fi # Maven profile must be provided -if [ -z "${MVN_PROFILES}" ]; then - MVN_PROFILES="-P runAllTests" +if [ ${#MVN_PROFILES[@]} -eq 0 ]; then + MVN_PROFILES=("-P runAllTests") fi OUTPUT_PATH_PREFIX="${OUTPUT_DIR}"/"${HBASE_RC_VERSION}" @@ -145,17 +148,18 @@ function unzip_from_source() { function rat_test() { rm -f "${OUTPUT_PATH_PREFIX}"_rat_test - mvn clean apache-rat:check "${MVN_PROPERTIES}" 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 + mvn clean apache-rat:check "${MVN_PROPERTIES[@]}" 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 } function build_from_source() { rm -f "${OUTPUT_PATH_PREFIX}"_build_from_source - mvn clean install "${MVN_PROPERTIES}" -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 + # Hardcode skipTests for faster build. Testing is covered later. + mvn clean install "${MVN_PROPERTIES[@]}" -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 } function run_tests() { rm -f "${OUTPUT_PATH_PREFIX}"_run_tests - mvn package "${MVN_PROFILES}" "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 + mvn package "${MVN_PROFILES[@]}" "${MVN_PROPERTIES[@]}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 } function execute() { @@ -167,11 +171,11 @@ function print_when_exit() { * Signature: $( ((SIGNATURE_PASSED)) && echo "ok" || echo "failed" ) * Checksum : $( ((CHECKSUM_PASSED)) && echo "ok" || echo "failed" ) * Rat check (${JAVA_VERSION}): $( ((RAT_CHECK_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean apache-rat:check "${MVN_PROPERTIES}" + - mvn clean apache-rat:check ${MVN_PROPERTIES[@]} * Built from source (${JAVA_VERSION}): $( ((BUILD_FROM_SOURCE_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean install -DskipTests "${MVN_PROPERTIES}" + - mvn clean install ${MVN_PROPERTIES[@]} -DskipTests * Unit tests pass (${JAVA_VERSION}): $( ((UNIT_TEST_PASSED)) && echo "ok" || echo "failed" ) - - mvn package ${MVN_PROFILES} "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 + - mvn package ${MVN_PROFILES[@]} ${MVN_PROPERTIES[@]} -Dsurefire.rerunFailingTestsCount=3 __EOF if ((CHECKSUM_PASSED)) && ((SIGNATURE_PASSED)) && ((RAT_CHECK_PASSED)) && ((BUILD_FROM_SOURCE_PASSED)) && ((UNIT_TEST_PASSED)) ; then exit 0 From c853c99b209cb2dfefb8d15595d9a9a5469e24fa Mon Sep 17 00:00:00 2001 From: lixiaobao Date: Wed, 9 Dec 2020 21:34:36 +0800 Subject: [PATCH 258/769] HBASE-25372 Fix typo in ban-jersey section of the enforcer plugin in pom.xml (#2749) Signed-off-by: Wei-Chiu Chuang Signed-off-by: Duo Zhang --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index fe53e353b68f..feeeb7f7f49b 100755 --- a/pom.xml +++ b/pom.xml @@ -1200,8 +1200,8 @@ - >org.glassfish.jersey.containers:** - >org.glassfish.jersey.core:** + org.glassfish.jersey.containers:** + org.glassfish.jersey.core:** Use shaded jersey instead From c1aa3b24e930e2c47ff4d7f6e286cb450458dffc Mon Sep 17 00:00:00 2001 From: Huaxiang Sun Date: Mon, 30 Nov 2020 15:58:25 -0800 Subject: [PATCH 259/769] HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side. --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 + ...talogReplicaLoadBalanceSimpleSelector.java | 19 ++- ...talogReplicaLoadBalanceSimpleSelector.java | 132 ++++++++++++++++++ 4 files changed, 144 insertions(+), 11 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 2c2520f8bd12..1c686aca8b76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = 1; + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index c3ce868757f1..27be88a9def2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,6 +28,8 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { + int UNINITIALIZED_NUM_OF_REPLICAS = -1; + /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index bc8264050149..01996b34e2ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,7 +108,6 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; - private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -117,7 +116,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -146,7 +145,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -262,16 +261,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - if (newNumOfReplicas == 1) { - LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " - + "fetch the replica count", tableName); + // If the returned number of replicas is -1, it is caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. + if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + LOG.error("Failed to fetch Table {}'s region replica count", tableName); + return this.numOfReplicas; } - int cachedNumOfReplicas = this.numOfReplicas; - // If the returned number of replicas is 1, it is mostly caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. + int cachedNumOfReplicas = this.numOfReplicas; if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { + (cachedNumOfReplicas != newNumOfReplicas)) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java new file mode 100644 index 000000000000..6b14286f99ca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; +import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MediumTests.class, ClientTests.class }) +public class TestCatalogReplicaLoadBalanceSimpleSelector { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final Logger LOG = LoggerFactory.getLogger( + TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final int NB_SERVERS = 4; + private static int numOfMetaReplica = NB_SERVERS - 1; + + private static AsyncConnectionImpl CONN; + + private static ConnectionRegistry registry; + private static Admin admin; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + + TEST_UTIL.startMiniCluster(NB_SERVERS); + admin = TEST_UTIL.getAdmin(); + admin.balancerSwitch(false, true); + + // Enable hbase:meta replication. + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() >= numOfMetaReplica); + + registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + CONN = new AsyncConnectionImpl(conf, registry, + registry.getClusterId().get(), null, User.getCurrent()); + } + + @AfterClass + public static void tearDown() throws Exception { + IOUtils.closeQuietly(CONN); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMetaChangeFromReplicaNoReplica() throws IOException, InterruptedException { + String replicaSelectorClass = CONN.getConfiguration(). + get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + + CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get + (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + + assertNotEquals( + metaSelector.select(TableName.valueOf("test"), EMPTY_START_ROW, RegionLocateType.CURRENT), + RegionReplicaUtil.DEFAULT_REPLICA_ID); + + // Change to No meta replica + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() == 1); + + CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = + CatalogReplicaLoadBalanceSelectorFactory.createSelector( + replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get( + CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + assertEquals( + metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, + RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); + } +} From 78514383795b35b0eed494644c6ef8934bc8a9c0 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 9 Dec 2020 12:18:08 -0800 Subject: [PATCH 260/769] HBASE-25376 [create-release] Fix double .asc (#2755) Signed-off-by: Andrew Purtell --- dev-support/create-release/release-util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 4dffd672c2ae..f1f732a7727c 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -685,7 +685,7 @@ function maven_deploy { #inputs: maven_set_version "$RELEASE_VERSION" # Prepare for signing kick_gpg_agent - declare -a mvn_goals=(clean install) + declare -a mvn_goals=(clean) if ! is_dry_run; then mvn_goals=("${mvn_goals[@]}" deploy) fi From 7a532f83286e4125df3cfe7adc9822c267f36250 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 9 Dec 2020 16:54:18 -0800 Subject: [PATCH 261/769] HBASE-25380 [create-release] Add timestamping to log output (#2758) Added logging of timestamp so we can tell where we are spending time. Added context to the README copied from head of entrance script. Signed-off-by: Andrew Purtell --- dev-support/create-release/README.txt | 32 ++++++++-- .../create-release/do-release-docker.sh | 30 +++++----- dev-support/create-release/release-build.sh | 30 +++++----- dev-support/create-release/release-util.sh | 59 ++++++++++--------- 4 files changed, 90 insertions(+), 61 deletions(-) diff --git a/dev-support/create-release/README.txt b/dev-support/create-release/README.txt index e696574507f9..aff562445e12 100644 --- a/dev-support/create-release/README.txt +++ b/dev-support/create-release/README.txt @@ -1,7 +1,31 @@ -Entrance script is _do-release-docker.sh_. Requires a local docker; -for example, on mac os x, Docker for Desktop installed and running. - -For usage, pass '-h': +Creates a HBase release candidate. The script will update versions, tag the branch, +build HBase binary packages and documentation, and upload maven artifacts to a staging +repository. There is also a dry run mode where only local builds are performed, and +nothing is uploaded to the ASF repos. + +Run with "-h" for options. For example, running below will do all +steps above using the 'rm' dir under Downloads as workspace: + + $ ./do-release-docker.sh -d ~/Downloads/rm + +The scripts in this directory came originally from spark +(https://github.com/apache/spark/tree/master/dev/create-release). They were then +modified to suite the hbase context. These scripts supercedes the old +../make_rc.sh script for making release candidates because what is here is more +comprehensive doing more steps of the RM process as well as running in a +container so the RM build environment can be a constant. + +It: + + * Tags release + * Sets version to the release version + * Sets version to next SNAPSHOT version. + * Builds, signs, and hashes all artifacts. + * Pushes release tgzs to the dev dir in a apache dist. + * Pushes to repository.apache.org staging. + +The entry point is here, in the do-release-docker.sh script. Requires a local +docker; for example, on mac os x, Docker for Desktop installed and running. $ ./do-release-docker.sh -h diff --git a/dev-support/create-release/do-release-docker.sh b/dev-support/create-release/do-release-docker.sh index b48581f9165b..cda814cfbf1b 100755 --- a/dev-support/create-release/do-release-docker.sh +++ b/dev-support/create-release/do-release-docker.sh @@ -76,7 +76,7 @@ Options: -s [step] runs a single step of the process; valid steps are: tag|publish-dist|publish-release. If none specified, runs tag, then publish-dist, and then publish-release. 'publish-snapshot' is also an allowed, less used, option. - -x debug. do less clean up. (env file, gpg forwarding on mac) + -x debug. Does less clean up (env file, gpg forwarding on mac) EOF exit 1 } @@ -147,7 +147,7 @@ done # We need to import that public key in the container in order to use the private key via the agent. GPG_KEY_FILE="$WORKDIR/gpg.key.public" -echo "Exporting public key for ${GPG_KEY}" +log "Exporting public key for ${GPG_KEY}" fcreate_secure "$GPG_KEY_FILE" $GPG "${GPG_ARGS[@]}" --export "${GPG_KEY}" > "${GPG_KEY_FILE}" @@ -155,10 +155,10 @@ function cleanup { local id banner "Release Cleanup" if is_debug; then - echo "skipping due to debug run" + log "skipping due to debug run" return 0 fi - echo "details in cleanup.log" + log "details in cleanup.log" if [ -f "${ENVFILE}" ]; then rm -f "$ENVFILE" fi @@ -186,7 +186,7 @@ function cleanup { trap cleanup EXIT -echo "Host OS: ${HOST_OS}" +log "Host OS: ${HOST_OS}" if [ "${HOST_OS}" == "DARWIN" ]; then run_silent "Building gpg-agent-proxy image with tag ${IMGTAG}..." "docker-proxy-build.log" \ docker build --build-arg "UID=${UID}" --build-arg "RM_USER=${USER}" \ @@ -198,7 +198,7 @@ run_silent "Building hbase-rm image with tag $IMGTAG..." "docker-build.log" \ --build-arg "RM_USER=${USER}" "$SELF/hbase-rm" banner "Final prep for container launch." -echo "Writing out environment for container." +log "Writing out environment for container." # Write the release information to a file with environment variables to be used when running the # image. ENVFILE="$WORKDIR/env.list" @@ -244,7 +244,7 @@ if [ -n "${GIT_REPO}" ]; then ;; # on the host but normally git wouldn't use the local optimization file://*) - echo "[INFO] converted file:// git repo to a local path, which changes git to assume --local." + log "Converted file:// git repo to a local path, which changes git to assume --local." GIT_REPO_MOUNT=(--mount "type=bind,src=${GIT_REPO#file://},dst=/opt/hbase-repo,consistency=delegated") echo "HOST_GIT_REPO=${GIT_REPO}" >> "${ENVFILE}" GIT_REPO="/opt/hbase-repo" @@ -286,8 +286,8 @@ fi GPG_PROXY_MOUNT=() if [ "${HOST_OS}" == "DARWIN" ]; then GPG_PROXY_MOUNT=(--mount "type=volume,src=gpgagent,dst=/home/${USER}/.gnupg/") - echo "Setting up GPG agent proxy container needed on OS X." - echo " we should clean this up for you. If that fails the container ID is below and in " \ + log "Setting up GPG agent proxy container needed on OS X." + log " we should clean this up for you. If that fails the container ID is below and in " \ "gpg-proxy.cid" #TODO the key pair used should be configurable docker run --rm -p 62222:22 \ @@ -301,8 +301,8 @@ if [ "${HOST_OS}" == "DARWIN" ]; then sort "${HOME}/.ssh/known_hosts" | comm -1 -3 - "${WORKDIR}/gpg-agent-proxy.ssh-keyscan" \ > "${WORKDIR}/gpg-agent-proxy.known_hosts" if [ -s "${WORKDIR}/gpg-agent-proxy.known_hosts" ]; then - echo "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." - echo "The following entry(ies) are missing:" + log "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." + log "The following entry(ies) are missing:" sed -e 's/^/ /' "${WORKDIR}/gpg-agent-proxy.known_hosts" read -r -p "Okay to add these entries to ${HOME}/.ssh/known_hosts? [y/n] " ANSWER if [ "$ANSWER" != "y" ]; then @@ -310,8 +310,8 @@ if [ "${HOST_OS}" == "DARWIN" ]; then fi cat "${WORKDIR}/gpg-agent-proxy.known_hosts" >> "${HOME}/.ssh/known_hosts" fi - echo "Launching ssh reverse tunnel from the container to gpg agent." - echo " we should clean this up for you. If that fails the PID is in gpg-proxy.ssh.pid" + log "Launching ssh reverse tunnel from the container to gpg agent." + log " we should clean this up for you. If that fails the PID is in gpg-proxy.ssh.pid" ssh -p 62222 -R "/home/${USER}/.gnupg/S.gpg-agent:$(gpgconf --list-dir agent-extra-socket)" \ -i "${HOME}/.ssh/id_rsa" -N -n localhost >gpg-proxy.ssh.log 2>&1 & echo $! > "${WORKDIR}/gpg-proxy.ssh.pid" @@ -326,10 +326,10 @@ else fi banner "Building $RELEASE_TAG; output will be at $WORKDIR/output" -echo "We should clean the container up when we are done. If that fails then the container ID " \ +log "We should clean the container up when we are done. If that fails then the container ID " \ "is in release.cid" echo -# Where possible we specifcy "consistency=delegated" when we do not need host access during the +# Where possible we specify "consistency=delegated" when we do not need host access during the # build run. On Mac OS X specifically this gets us a big perf improvement. cmd=(docker run --rm -ti \ --env-file "$ENVFILE" \ diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index db28f6f08b42..44a594fff3d6 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -81,7 +81,7 @@ set -e function cleanup { # If REPO was set, then leave things be. Otherwise if we defined a repo clean it out. if [[ -z "${REPO}" ]] && [[ -n "${MAVEN_LOCAL_REPO}" ]]; then - echo "Cleaning up temp repo in '${MAVEN_LOCAL_REPO}'. Set REPO to reuse downloads." >&2 + log "Cleaning up temp repo in '${MAVEN_LOCAL_REPO}'. Set REPO to reuse downloads." >&2 rm -f "${MAVEN_SETTINGS_FILE}" &> /dev/null || true rm -rf "${MAVEN_LOCAL_REPO}" &> /dev/null || true fi @@ -142,7 +142,7 @@ if [[ "$1" == "tag" ]]; then git add RELEASENOTES.md CHANGES.md git commit -a -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" - echo "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" + log "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" git tag "$RELEASE_TAG" # Create next version @@ -159,7 +159,7 @@ if [[ "$1" == "tag" ]]; then else cd .. mv "${PROJECT}" "${PROJECT}.tag" - echo "Dry run: Clone with version changes and tag available as ${PROJECT}.tag in the output directory." + log "Dry run: Clone with version changes and tag available as ${PROJECT}.tag in the output directory." fi exit 0 fi @@ -186,7 +186,7 @@ fi cd "${PROJECT}" git checkout "$GIT_REF" git_hash="$(git rev-parse --short HEAD)" -echo "Checked out ${PROJECT} at ${GIT_REF} commit $git_hash" +log "Checked out ${PROJECT} at ${GIT_REF} commit $git_hash" if [ -z "${RELEASE_VERSION}" ]; then RELEASE_VERSION="$(maven_get_version)" @@ -210,7 +210,7 @@ cd .. if [[ "$1" == "publish-dist" ]]; then # Source and binary tarballs - echo "Packaging release source tarballs" + log "Packaging release source tarballs" make_src_release "${PROJECT}" "${RELEASE_VERSION}" # we do not have binary tarballs for hbase-thirdparty @@ -228,7 +228,7 @@ if [[ "$1" == "publish-dist" ]]; then rm -rf "${svn_target:?}/${DEST_DIR_NAME}" mkdir -p "$svn_target/${DEST_DIR_NAME}" - echo "Copying release tarballs" + log "Copying release tarballs" cp "${PROJECT}"-*.tar.* "$svn_target/${DEST_DIR_NAME}/" cp "${PROJECT}/CHANGES.md" "$svn_target/${DEST_DIR_NAME}/" cp "${PROJECT}/RELEASENOTES.md" "$svn_target/${DEST_DIR_NAME}/" @@ -241,6 +241,7 @@ if [[ "$1" == "publish-dist" ]]; then fi shopt -u nocasematch + log "svn add" svn add "$svn_target/${DEST_DIR_NAME}" if ! is_dry_run; then @@ -250,9 +251,10 @@ if [[ "$1" == "publish-dist" ]]; then rm -rf "$svn_target" else mv "$svn_target/${DEST_DIR_NAME}" "${svn_target}_${DEST_DIR_NAME}.dist" - echo "Dry run: svn-managed 'dist' directory with release tarballs, CHANGES.md and RELEASENOTES.md available as $(pwd)/${svn_target}_${DEST_DIR_NAME}.dist" + log "Dry run: svn-managed 'dist' directory with release tarballs, CHANGES.md and RELEASENOTES.md available as $(pwd)/${svn_target}_${DEST_DIR_NAME}.dist" rm -rf "$svn_target" fi + log "svn ci done" exit 0 fi @@ -261,13 +263,13 @@ if [[ "$1" == "publish-snapshot" ]]; then ( cd "${PROJECT}" mvn_log="${BASE_DIR}/mvn_deploy_snapshot.log" - echo "Publishing snapshot to nexus" + log "Publishing snapshot to nexus" maven_deploy snapshot "$mvn_log" if ! is_dry_run; then - echo "Snapshot artifacts successfully published to repo." + log "Snapshot artifacts successfully published to repo." rm "$mvn_log" else - echo "Dry run: Snapshot artifacts successfully built, but not published due to dry run." + log "Dry run: Snapshot artifacts successfully built, but not published due to dry run." fi ) exit $? @@ -277,16 +279,16 @@ if [[ "$1" == "publish-release" ]]; then ( cd "${PROJECT}" mvn_log="${BASE_DIR}/mvn_deploy_release.log" - echo "Staging release in nexus" + log "Staging release in nexus" maven_deploy release "$mvn_log" declare staged_repo_id="dryrun-no-repo" if ! is_dry_run; then staged_repo_id=$(grep -o "Closing staging repository with ID .*" "$mvn_log" \ | sed -e 's/Closing staging repository with ID "\([^"]*\)"./\1/') - echo "Release artifacts successfully published to repo ${staged_repo_id}" + log "Release artifacts successfully published to repo ${staged_repo_id}" rm "$mvn_log" else - echo "Dry run: Release artifacts successfully built, but not published due to dry run." + log "Dry run: Release artifacts successfully built, but not published due to dry run." fi # Dump out email to send. Where we find vote.tmpl depends # on where this script is run from @@ -300,5 +302,5 @@ fi set +x # done with detailed logging cd .. rm -rf "${PROJECT}" -echo "ERROR: expects to be called with 'tag', 'publish-dist', 'publish-release', or 'publish-snapshot'" >&2 +log "ERROR: expects to be called with 'tag', 'publish-dist', 'publish-release', or 'publish-snapshot'" >&2 exit_with_usage diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index f1f732a7727c..5f7224fded6b 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -29,7 +29,7 @@ PUBLISH_PROFILES=("-P" "apache-release,release") set -e function error { - echo "Error: $*" >&2 + log "Error: $*" >&2 exit 1 } @@ -54,10 +54,14 @@ function parse_version { function banner { local msg="$1" echo "========================" - echo "=== ${msg}" + log "${msg}" echo } +function log { + echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") ${1}" +} + # current number of seconds since epoch function get_ctime { date +"%s" @@ -71,17 +75,17 @@ function run_silent { local -i stop_time banner "${BANNER}" - echo "Command: $*" - echo "Log file: $LOG_FILE" + log "Command: $*" + log "Log file: $LOG_FILE" start_time="$(get_ctime)" if ! "$@" 1>"$LOG_FILE" 2>&1; then - echo "Command FAILED. Check full logs for details." + log "Command FAILED. Check full logs for details." tail "$LOG_FILE" exit 1 fi stop_time="$(get_ctime)" - echo "=== SUCCESS ($((stop_time - start_time)) seconds)" + log "SUCCESS ($((stop_time - start_time)) seconds)" } function fcreate_secure { @@ -147,7 +151,7 @@ function get_release_info { local version version="$(curl -s "$ASF_REPO_WEBUI;a=blob_plain;f=pom.xml;hb=refs/heads/$GIT_BRANCH" | parse_version)" - echo "Current branch VERSION is $version." + log "Current branch VERSION is $version." NEXT_VERSION="$version" RELEASE_VERSION="" @@ -199,7 +203,7 @@ function get_release_info { if git ls-remote --tags "$ASF_REPO" "$RELEASE_TAG" | grep -q "refs/tags/${RELEASE_TAG}$" ; then read -r -p "$RELEASE_TAG already exists. Continue anyway [y/n]? " ANSWER if [ "$ANSWER" != "y" ]; then - echo "Exiting." + log "Exiting." exit 1 fi SKIP_TAG=1 @@ -209,7 +213,7 @@ function get_release_info { GIT_REF="$RELEASE_TAG" if is_dry_run; then - echo "This is a dry run. If tag does not actually exist, please confirm the ref that will be built for testing." + log "This is a dry run. If tag does not actually exist, please confirm the ref that will be built for testing." GIT_REF="$(read_config "GIT_REF" "$GIT_REF")" fi export GIT_REF @@ -252,7 +256,7 @@ EOF read -r -p "Is this info correct [y/n]? " ANSWER if [ "$ANSWER" != "y" ]; then - echo "Exiting." + log "Exiting." exit 1 fi GPG_ARGS=("${GPG_ARGS[@]}" --local-user "${GPG_KEY}") @@ -279,7 +283,7 @@ function is_debug { function check_get_passwords { for env in "$@"; do if [ -z "${!env}" ]; then - echo "The environment variable $env is not set. Please enter the password or passphrase." + log "The environment variable $env is not set. Please enter the password or passphrase." echo # shellcheck disable=SC2229 stty -echo && printf "%s : " "$env" && read -r "$env" && printf '\n' && stty echo @@ -293,7 +297,7 @@ function check_needed_vars { local missing=0 for env in "$@"; do if [ -z "${!env}" ]; then - echo "$env must be set to run this script" + log "$env must be set to run this script" (( missing++ )) else # shellcheck disable=SC2163 @@ -322,7 +326,7 @@ function init_java { error "JAVA_HOME is not set." fi JAVA_VERSION=$("${JAVA_HOME}"/bin/javac -version 2>&1 | cut -d " " -f 2) - echo "java version: $JAVA_VERSION" + log "java version: $JAVA_VERSION" export JAVA_VERSION } @@ -330,7 +334,7 @@ function init_python { if ! [ -x "$(command -v python2)" ]; then error 'python2 needed by yetus. Install or add link? E.g: sudo ln -sf /usr/bin/python2.7 /usr/local/bin/python2' fi - echo "python version: $(python2 --version)" + log "python version: $(python2 --version)" } # Set MVN @@ -357,7 +361,7 @@ function init_yetus { fi # Work around yetus bug by asking test-patch for the version instead of rdm. YETUS_VERSION=$("${YETUS_HOME}/bin/test-patch" --version) - echo "Apache Yetus version ${YETUS_VERSION}" + log "Apache Yetus version ${YETUS_VERSION}" } function configure_maven { @@ -409,7 +413,7 @@ function git_clone_overwrite { if [[ -z "${GIT_REPO}" ]]; then asf_repo="gitbox.apache.org/repos/asf/${PROJECT}.git" - echo "[INFO] clone will be of the gitbox repo for ${PROJECT}." + log "Clone will be of the gitbox repo for ${PROJECT}." if [ -n "${ASF_USERNAME}" ] && [ -n "${ASF_PASSWORD}" ]; then # Ugly! encoded_username=$(python -c "import urllib; print urllib.quote('''$ASF_USERNAME''', '')") @@ -419,7 +423,7 @@ function git_clone_overwrite { GIT_REPO="https://${asf_repo}" fi else - echo "[INFO] clone will be of provided git repo." + log "Clone will be of provided git repo." fi # N.B. we use the shared flag because the clone is short lived and if a local repo repo was # given this will let us refer to objects there directly instead of hardlinks or copying. @@ -440,7 +444,7 @@ function start_step { if [ -z "${name}" ]; then name="${FUNCNAME[1]}" fi - echo "$(date -u +'%Y-%m-%dT%H:%M:%SZ') ${name} start" >&2 + log "${name} start" >&2 get_ctime } @@ -452,7 +456,7 @@ function stop_step { name="${FUNCNAME[1]}" fi stop_time="$(get_ctime)" - echo "$(date -u +'%Y-%m-%dT%H:%M:%SZ') ${name} stop ($((stop_time - start_time)) seconds)" + log "${name} stop ($((stop_time - start_time)) seconds)" } # Writes report into cwd! @@ -488,7 +492,7 @@ function get_jira_name { if [[ -z "$jira_name" ]]; then error "Sorry, can't determine the Jira name for project $project" fi - echo "$jira_name" + log "$jira_name" } # Update the CHANGES.md @@ -625,7 +629,7 @@ make_binary_release() { done else cd .. || exit - echo "No ${f_bin_prefix}*-bin.tar.gz product; expected?" + log "No ${f_bin_prefix}*-bin.tar.gz product; expected?" fi stop_step "${timing_token}" @@ -648,7 +652,7 @@ function kick_gpg_agent { # Do maven command to set version into local pom function maven_set_version { #input: local this_version="$1" - echo "${MVN[@]}" versions:set -DnewVersion="$this_version" + log "${MVN[@]}" versions:set -DnewVersion="$this_version" "${MVN[@]}" versions:set -DnewVersion="$this_version" | grep -v "no value" # silence logs } @@ -679,8 +683,8 @@ function maven_deploy { #inputs: fi # Publish ${PROJECT} to Maven repo # shellcheck disable=SC2154 - echo "Publishing ${PROJECT} checkout at '$GIT_REF' ($git_hash)" - echo "Publish version is $RELEASE_VERSION" + log "Publishing ${PROJECT} checkout at '$GIT_REF' ($git_hash)" + log "Publish version is $RELEASE_VERSION" # Coerce the requested version maven_set_version "$RELEASE_VERSION" # Prepare for signing @@ -689,9 +693,8 @@ function maven_deploy { #inputs: if ! is_dry_run; then mvn_goals=("${mvn_goals[@]}" deploy) fi - echo "${MVN[@]}" -DskipTests -Dcheckstyle.skip=true "${PUBLISH_PROFILES[@]}" \ - "${mvn_goals[@]}" - echo "Logging to ${mvn_log_file}. This will take a while..." + log "${MVN[@]}" -DskipTests -Dcheckstyle.skip=true "${PUBLISH_PROFILES[@]}" "${mvn_goals[@]}" + log "Logging to ${mvn_log_file}. This will take a while..." rm -f "$mvn_log_file" # The tortuous redirect in the next command allows mvn's stdout and stderr to go to mvn_log_file, # while also sending stderr back to the caller. @@ -700,7 +703,7 @@ function maven_deploy { #inputs: "${mvn_goals[@]}" 1>> "$mvn_log_file" 2> >( tee -a "$mvn_log_file" >&2 ); then error "Deploy build failed, for details see log at '$mvn_log_file'." fi - echo "BUILD SUCCESS." + log "BUILD SUCCESS." stop_step "${timing_token}" return 0 } From a9b8c10f8029bcc02a6d3a44ce4f9ab51187e4eb Mon Sep 17 00:00:00 2001 From: haxl Date: Thu, 10 Dec 2020 22:07:06 +0800 Subject: [PATCH 262/769] HBASE-25334 TestRSGroupsFallback.testFallback is flaky (#2728) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/master/ServerManager.java | 3 ++- .../org/apache/hadoop/hbase/master/TestDeadServer.java | 9 +++++++-- .../hadoop/hbase/rsgroup/TestRSGroupsFallback.java | 10 +++++++--- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 9e666c56a890..8977174edba7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -501,7 +501,8 @@ public DeadServer getDeadServers() { * @return true if any RS are being processed as dead, false if not */ public boolean areDeadServersInProgress() throws IOException { - return master.getProcedures().stream().anyMatch(p -> p instanceof ServerCrashProcedure); + return master.getProcedures().stream() + .anyMatch(p -> !p.isFinished() && p instanceof ServerCrashProcedure); } void letRegionServersShutdown() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java index 39163ab1de08..b0d6cb63ea76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java @@ -25,6 +25,7 @@ import java.util.Set; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; @@ -93,15 +94,19 @@ public static void tearDownAfterClass() throws Exception { } @Test - public void testCrashProcedureReplay() throws IOException { + public void testCrashProcedureReplay() throws Exception { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); final ProcedureExecutor pExecutor = master.getMasterProcedureExecutor(); ServerCrashProcedure proc = new ServerCrashProcedure( pExecutor.getEnvironment(), hostname123, false, false); + pExecutor.stop(); ProcedureTestingUtility.submitAndWait(pExecutor, proc); - assertTrue(master.getServerManager().areDeadServersInProgress()); + + ProcedureTestingUtility.restart(pExecutor); + ProcedureTestingUtility.waitProcedure(pExecutor, proc); + assertFalse(master.getServerManager().areDeadServersInProgress()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java index ea5e226b7f6b..478ffc654757 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java @@ -25,12 +25,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RSGroupTests; import org.apache.hadoop.hbase.util.Bytes; @@ -61,6 +63,7 @@ public class TestRSGroupsFallback extends TestRSGroupsBase { public static void setUp() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean(RSGroupBasedLoadBalancer.FALLBACK_GROUP_ENABLE_KEY, true); + conf.setInt(HConstants.HBASE_BALANCER_MAX_BALANCING, 0); setUpTestBeforeClass(); MASTER.balanceSwitch(true); } @@ -103,7 +106,7 @@ public void testFallback() throws Exception { // add a new server to default group, regions move to default group TEST_UTIL.getMiniHBaseCluster().startRegionServerAndWait(60000); - MASTER.balance(); + assertTrue(MASTER.balance()); assertRegionsInGroup(tableName, RSGroupInfo.DEFAULT_GROUP); // add a new server to test group, regions move back @@ -111,14 +114,15 @@ public void testFallback() throws Exception { TEST_UTIL.getMiniHBaseCluster().startRegionServerAndWait(60000); ADMIN.moveServersToRSGroup( Collections.singleton(t.getRegionServer().getServerName().getAddress()), groupName); - MASTER.balance(); + assertTrue(MASTER.balance()); assertRegionsInGroup(tableName, groupName); TEST_UTIL.deleteTable(tableName); } private void assertRegionsInGroup(TableName table, String group) throws IOException { - TEST_UTIL.waitUntilAllRegionsAssigned(table); + ProcedureTestingUtility.waitAllProcedures( + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); RSGroupInfo rsGroup = ADMIN.getRSGroup(group); MASTER.getAssignmentManager().getRegionStates().getRegionsOfTable(table).forEach(region -> { Address regionOnServer = MASTER.getAssignmentManager().getRegionStates() From 635c9115322b0be6eae2a630ef5cd59ba401fcb7 Mon Sep 17 00:00:00 2001 From: haxl Date: Thu, 10 Dec 2020 22:15:39 +0800 Subject: [PATCH 263/769] HBASE-25287 Forgetting to unbuffer streams results in many CLOSE_WAIT sockets when loading files (#2699) Signed-off-by: Andrew Purtell Signed-off-by: Duo Zhang --- .../hadoop/hbase/io/hfile/HFileInfo.java | 52 +++++++++++-------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 330ef6fed003..5d65ff3b3a39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -342,8 +342,8 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr Path path = context.getFilePath(); checkFileVersion(path); this.hfileContext = createHFileContext(path, trailer, conf); - } catch (Throwable t) { context.getInputStreamWrapper().unbuffer(); + } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper()); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); @@ -355,28 +355,36 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr */ public void initMetaAndIndex(HFile.Reader reader) throws IOException { ReaderContext context = reader.getContext(); - HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); - // Initialize an block iterator, and parse load-on-open blocks in the following. - blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), - context.getFileSize() - trailer.getTrailerSize()); - // Data index. We also read statistics about the block index written after - // the root level. - this.dataIndexReader = new HFileBlockIndex - .CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); - dataIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), - trailer.getDataIndexCount()); - reader.setDataBlockIndexReader(dataIndexReader); - // Meta index. - this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); - metaIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), + try { + HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); + // Initialize an block iterator, and parse load-on-open blocks in the following. + blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), + context.getFileSize() - trailer.getTrailerSize()); + // Data index. We also read statistics about the block index written after + // the root level. + this.dataIndexReader = + new HFileBlockIndex.CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); + dataIndexReader + .readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + reader.setDataBlockIndexReader(dataIndexReader); + // Meta index. + this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); + metaIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getMetaIndexCount()); - reader.setMetaBlockIndexReader(metaIndexReader); - loadMetaInfo(blockIter, hfileContext); - reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this)); - // Load-On-Open info - HFileBlock b; - while ((b = blockIter.nextBlock()) != null) { - loadOnOpenBlocks.add(b); + reader.setMetaBlockIndexReader(metaIndexReader); + loadMetaInfo(blockIter, hfileContext); + reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this)); + // Load-On-Open info + HFileBlock b; + while ((b = blockIter.nextBlock()) != null) { + loadOnOpenBlocks.add(b); + } + // close the block reader + context.getInputStreamWrapper().unbuffer(); + } catch (Throwable t) { + IOUtils.closeQuietly(context.getInputStreamWrapper()); + throw new CorruptHFileException("Problem reading data index and meta index from file " + + context.getFilePath(), t); } } From c62c18dca7154294ac8c76f922a8fb4cd3e3f936 Mon Sep 17 00:00:00 2001 From: Adam <37170106+hsiangawang@users.noreply.github.com> Date: Thu, 10 Dec 2020 08:28:21 -0600 Subject: [PATCH 264/769] HBASE-25370 Fix flaky test TestClassFinder#testClassFinderDefaultsToOwnPackage (#2740) Signed-off-by: Duo Zhang --- hbase-common/pom.xml | 5 +++++ .../test/java/org/apache/hadoop/hbase/TestClassFinder.java | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 48b3c0b7eded..8b9154156ba5 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -211,6 +211,11 @@ compile true + + org.hamcrest + hamcrest-library + test + org.mockito mockito-core diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java index b1c090322479..411bb65a95a5 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -295,7 +297,10 @@ public void testClassFinderDefaultsToOwnPackage() throws Exception { Set> pkgClasses = allClassesFinder.findClasses( ClassFinder.class.getPackage().getName(), false); Set> defaultClasses = allClassesFinder.findClasses(false); - assertArrayEquals(pkgClasses.toArray(), defaultClasses.toArray()); + Object[] pkgClassesArray = pkgClasses.toArray(); + Object[] defaultClassesArray = defaultClasses.toArray(); + assertEquals(pkgClassesArray.length, defaultClassesArray.length); + assertThat(pkgClassesArray, arrayContainingInAnyOrder(defaultClassesArray)); } private static class FileAndPath { From e04792959aa9f7440fcdc857233769892a43c60a Mon Sep 17 00:00:00 2001 From: Huaxiang Sun Date: Thu, 10 Dec 2020 10:12:53 -0800 Subject: [PATCH 265/769] Revert "HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side." This reverts commit c1aa3b24e930e2c47ff4d7f6e286cb450458dffc. --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 - ...talogReplicaLoadBalanceSimpleSelector.java | 19 +-- ...talogReplicaLoadBalanceSimpleSelector.java | 132 ------------------ 4 files changed, 11 insertions(+), 144 deletions(-) delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 1c686aca8b76..2c2520f8bd12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + int numOfReplicas = 1; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index 27be88a9def2..c3ce868757f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,8 +28,6 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { - int UNINITIALIZED_NUM_OF_REPLICAS = -1; - /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index 01996b34e2ef..bc8264050149 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,6 +108,7 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; + private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -116,7 +117,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -145,7 +146,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -261,16 +262,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - // If the returned number of replicas is -1, it is caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. - if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { - LOG.error("Failed to fetch Table {}'s region replica count", tableName); - return this.numOfReplicas; + if (newNumOfReplicas == 1) { + LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " + + "fetch the replica count", tableName); } - int cachedNumOfReplicas = this.numOfReplicas; + + // If the returned number of replicas is 1, it is mostly caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - (cachedNumOfReplicas != newNumOfReplicas)) { + ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java deleted file mode 100644 index 6b14286f99ca..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Category({ MediumTests.class, ClientTests.class }) -public class TestCatalogReplicaLoadBalanceSimpleSelector { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); - - private static final Logger LOG = LoggerFactory.getLogger( - TestCatalogReplicaLoadBalanceSimpleSelector.class); - - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - - private static final int NB_SERVERS = 4; - private static int numOfMetaReplica = NB_SERVERS - 1; - - private static AsyncConnectionImpl CONN; - - private static ConnectionRegistry registry; - private static Admin admin; - - @BeforeClass - public static void setUp() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - - TEST_UTIL.startMiniCluster(NB_SERVERS); - admin = TEST_UTIL.getAdmin(); - admin.balancerSwitch(false, true); - - // Enable hbase:meta replication. - HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( - TableName.META_TABLE_NAME).size() >= numOfMetaReplica); - - registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); - CONN = new AsyncConnectionImpl(conf, registry, - registry.getClusterId().get(), null, User.getCurrent()); - } - - @AfterClass - public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testMetaChangeFromReplicaNoReplica() throws IOException, InterruptedException { - String replicaSelectorClass = CONN.getConfiguration(). - get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, - CatalogReplicaLoadBalanceSimpleSelector.class.getName()); - - CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory - .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get - (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); - - assertNotEquals( - metaSelector.select(TableName.valueOf("test"), EMPTY_START_ROW, RegionLocateType.CURRENT), - RegionReplicaUtil.DEFAULT_REPLICA_ID); - - // Change to No meta replica - HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( - TableName.META_TABLE_NAME).size() == 1); - - CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = - CatalogReplicaLoadBalanceSelectorFactory.createSelector( - replicaSelectorClass, META_TABLE_NAME, CONN, () -> { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get( - CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); - assertEquals( - metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, - RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); - } -} From f098461a550cdb98a836e462470db3ada20761e4 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 11 Dec 2020 08:22:49 +0800 Subject: [PATCH 266/769] HBASE-25370 Addendum fix checkstyle issue and dependencies --- .../src/test/java/org/apache/hadoop/hbase/TestClassFinder.java | 1 - pom.xml | 2 -- 2 files changed, 3 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java index 411bb65a95a5..cf97e313cf0c 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java @@ -19,7 +19,6 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; diff --git a/pom.xml b/pom.xml index feeeb7f7f49b..f23d0db549b9 100755 --- a/pom.xml +++ b/pom.xml @@ -2286,13 +2286,11 @@ org.hamcrest hamcrest-core ${hamcrest.version} - test org.hamcrest hamcrest-library ${hamcrest.version} - test org.mockito From d50816fe448971b8e586792f0584aaf601e31780 Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Sat, 12 Dec 2020 21:10:33 +0800 Subject: [PATCH 267/769] =?UTF-8?q?HBASE-23340=20hmaster=20/hbase/replicat?= =?UTF-8?q?ion/rs=20session=20expired=20(hbase=20repl=E2=80=A6=20(#2739)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/master/HMaster.java | 6 ++-- .../hbase/master/cleaner/LogCleaner.java | 5 ++-- .../master/ReplicationLogCleaner.java | 26 ++++++++++++----- .../hbase/master/cleaner/TestLogsCleaner.java | 28 ++++++++++++++----- .../region/TestMasterRegionWALCleaner.java | 2 +- 5 files changed, 47 insertions(+), 20 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 7d29ed668823..a61254f56101 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1336,17 +1336,17 @@ private void startServiceThreads() throws IOException { // Create cleaner thread pool cleanerPool = new DirScanPool(conf); + Map params = new HashMap<>(); + params.put(MASTER, this); // Start log cleaner thread int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); this.logCleaner = new LogCleaner(cleanerInterval, this, conf, - getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool); + getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool, params); getChoreService().scheduleChore(logCleaner); // start the hfile archive cleaner thread Path archiveDir = HFileArchiveUtil.getArchivePath(conf); - Map params = new HashMap<>(); - params.put(MASTER, this); this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(), archiveDir, cleanerPool, params); getChoreService().scheduleChore(hfileCleaner); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java index f65713ebf263..d8993b38ffef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; @@ -72,9 +73,9 @@ public class LogCleaner extends CleanerChore * @param pool the thread pool used to scan directories */ public LogCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs, - Path oldLogDir, DirScanPool pool) { + Path oldLogDir, DirScanPool pool, Map params) { super("LogsCleaner", period, stopper, conf, fs, oldLogDir, HBASE_MASTER_LOGCLEANER_PLUGINS, - pool); + pool, params); this.pendingDelete = new LinkedBlockingQueue<>(); int size = conf.getInt(OLD_WALS_CLEANER_THREAD_SIZE, DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE); this.oldWALsCleaner = createOldWalsCleaner(size); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index 8f016bcb9124..a7821f1894a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -19,16 +19,19 @@ import java.io.IOException; import java.util.Collections; +import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +46,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationLogCleaner extends BaseLogCleanerDelegate { private static final Logger LOG = LoggerFactory.getLogger(ReplicationLogCleaner.class); - private ZKWatcher zkw; + private ZKWatcher zkw = null; + private boolean shareZK = false; private ReplicationQueueStorage queueStorage; private boolean stopped = false; private Set wals; @@ -92,12 +96,20 @@ public boolean apply(FileStatus file) { } @Override - public void setConf(Configuration config) { - // Make my own Configuration. Then I'll have my own connection to zk that - // I can close myself when comes time. - Configuration conf = new Configuration(config); + public void init(Map params) { + super.init(params); try { - setConf(conf, new ZKWatcher(conf, "replicationLogCleaner", null)); + if (MapUtils.isNotEmpty(params)) { + Object master = params.get(HMaster.MASTER); + if (master != null && master instanceof HMaster) { + zkw = ((HMaster) master).getZooKeeper(); + shareZK = true; + } + } + if (zkw == null) { + zkw = new ZKWatcher(getConf(), "replicationLogCleaner", null); + } + this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); } catch (IOException e) { LOG.error("Error while configuring " + this.getClass().getName(), e); } @@ -126,7 +138,7 @@ public void setConf(Configuration conf, ZKWatcher zk, public void stop(String why) { if (this.stopped) return; this.stopped = true; - if (this.zkw != null) { + if (!shareZK && this.zkw != null) { LOG.info("Stopping " + this.zkw); this.zkw.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 8ed31d009fb7..064f9a657623 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -205,7 +205,7 @@ public void testLogCleaning() throws Exception { // 10 procedure WALs assertEquals(10, fs.listStatus(OLD_PROCEDURE_WALS_DIR).length); - LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, OLD_WALS_DIR, POOL); + LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, OLD_WALS_DIR, POOL, null); cleaner.chore(); // In oldWALs we end up with the current WAL, a newer WAL, the 3 old WALs which @@ -226,7 +226,7 @@ public void testLogCleaning() throws Exception { } @Test - public void testZooKeeperAbortDuringGetListOfReplicators() throws Exception { + public void testZooKeeperRecoveryDuringGetListOfReplicators() throws Exception { ReplicationLogCleaner cleaner = new ReplicationLogCleaner(); List dummyFiles = Arrays.asList( @@ -239,7 +239,7 @@ public void testZooKeeperAbortDuringGetListOfReplicators() throws Exception { final AtomicBoolean getListOfReplicatorsFailed = new AtomicBoolean(false); try { - faultyZK.init(); + faultyZK.init(false); ReplicationQueueStorage queueStorage = spy(ReplicationStorageFactory .getReplicationQueueStorage(faultyZK, conf)); doAnswer(new Answer() { @@ -263,6 +263,18 @@ public Object answer(InvocationOnMock invocation) throws Throwable { assertTrue(getListOfReplicatorsFailed.get()); assertFalse(toDelete.iterator().hasNext()); assertFalse(cleaner.isStopped()); + + //zk recovery. + faultyZK.init(true); + cleaner.preClean(); + Iterable filesToDelete = cleaner.getDeletableFiles(dummyFiles); + Iterator iter = filesToDelete.iterator(); + assertTrue(iter.hasNext()); + assertEquals(new Path("log1"), iter.next().getPath()); + assertTrue(iter.hasNext()); + assertEquals(new Path("log2"), iter.next().getPath()); + assertFalse(iter.hasNext()); + } finally { faultyZK.close(); } @@ -306,7 +318,7 @@ public void testOnConfigurationChange() throws Exception { Server server = new DummyServer(); FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); - LogCleaner cleaner = new LogCleaner(3000, server, conf, fs, OLD_WALS_DIR, POOL); + LogCleaner cleaner = new LogCleaner(3000, server, conf, fs, OLD_WALS_DIR, POOL, null); int size = cleaner.getSizeOfCleaners(); assertEquals(LogCleaner.DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC, cleaner.getCleanerThreadTimeoutMsec()); @@ -426,10 +438,12 @@ public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable a super(conf, identifier, abortable); } - public void init() throws Exception { + public void init(boolean autoRecovery) throws Exception { this.zk = spy(super.getRecoverableZooKeeper()); - doThrow(new KeeperException.ConnectionLossException()) - .when(zk).getChildren("/hbase/replication/rs", null); + if (!autoRecovery) { + doThrow(new KeeperException.ConnectionLossException()) + .when(zk).getChildren("/hbase/replication/rs", null); + } } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java index 08b5f9951906..39497b07e52f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java @@ -72,7 +72,7 @@ public void stop(String why) { public boolean isStopped() { return stopped; } - }, conf, fs, globalWALArchiveDir, cleanerPool); + }, conf, fs, globalWALArchiveDir, cleanerPool, null); choreService.scheduleChore(logCleaner); } From a4d42d1fcc2b43f6c8da97e47ff84548841e811c Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 15 Dec 2020 09:49:16 -0800 Subject: [PATCH 268/769] HBASE-25389 [Flakey Tests] branch-2 TestMetaShutdownHandler (#2773) Signed-off-by: Bharath Vissapragada --- .../apache/hadoop/hbase/master/TestMetaShutdownHandler.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java index 742734e0af23..d4c19335dd65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -102,6 +103,7 @@ public void testExpireMetaRegionServer() throws Exception { RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); assertEquals("Wrong state for meta!", RegionState.State.OPEN, metaState.getState()); assertNotEquals("Meta is on master!", metaServerName, master.getServerName()); + HRegionServer metaRegionServer = cluster.getRegionServer(metaServerName); // Delete the ephemeral node of the meta-carrying region server. // This is trigger the expire of this region server on the master. @@ -113,6 +115,7 @@ public void testExpireMetaRegionServer() throws Exception { // Wait for SSH to finish final ServerManager serverManager = master.getServerManager(); final ServerName priorMetaServerName = metaServerName; + TEST_UTIL.waitFor(60000, 100, () -> metaRegionServer.isStopped()); TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { From 9bdac6cd174bce570a2faf92849931b3e76f240d Mon Sep 17 00:00:00 2001 From: Pankaj Date: Tue, 15 Dec 2020 23:56:02 +0530 Subject: [PATCH 269/769] HBASE-25378 Legacy comparator in Hfile trailer will fail to load (#2756) Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/io/hfile/FixedFileTrailer.java | 2 ++ .../hadoop/hbase/io/hfile/TestFixedFileTrailer.java | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 7ab4edb438a6..6a2dcf926a4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -612,6 +612,8 @@ private static Class getComparatorClass(String compara comparatorKlass = CellComparatorImpl.class; } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { comparatorKlass = MetaCellComparator.class; } else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index a5215bfe1d94..6382a0d74701 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -130,6 +130,11 @@ public void testCreateComparator() throws IOException { t.createComparator(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()).getClass()); assertEquals(MetaCellComparator.class, t.createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass()); + assertEquals(MetaCellComparator.class, + t.createComparator("org.apache.hadoop.hbase.CellComparator$MetaCellComparator").getClass()); + assertEquals(MetaCellComparator.class, + t.createComparator("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator") + .getClass()); assertEquals(MetaCellComparator.class, t.createComparator( MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); assertEquals(MetaCellComparator.META_COMPARATOR.getClass(), t.createComparator( @@ -139,7 +144,8 @@ public void testCreateComparator() throws IOException { assertNull(t.createComparator(Bytes.BYTES_RAWCOMPARATOR.getClass().getName())); assertNull(t.createComparator("org.apache.hadoop.hbase.KeyValue$RawBytesComparator")); } catch (IOException e) { - fail("Unexpected exception while testing FixedFileTrailer#createComparator()"); + fail("Unexpected exception while testing FixedFileTrailer#createComparator(), " + + e.getMessage()); } // Test an invalid comparatorClassName From 1bb9b7878712ed4baef4814bb6aa948fe11159e8 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Wed, 16 Dec 2020 12:25:59 +0800 Subject: [PATCH 270/769] HBASE-25365 The log in move_servers_rsgroup is incorrect (#2742) Signed-off-by: stack --- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 35 +++++++++---------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 038e4dfc0e06..9850917e795d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -954,27 +954,28 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { * located there. * @param movedServers the servers that are moved to new group * @param srcGrpServers all servers in the source group, excluding the movedServers - * @param targetGroup the target group + * @param targetGroupName the target group + * @param sourceGroupName the source group * @throws IOException if moving the server and tables fail */ private void moveServerRegionsFromGroup(Set
    movedServers, Set
    srcGrpServers, - RSGroupInfo targetGroup) throws IOException { - moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroup, rs -> getRegions(rs), - info -> { + String targetGroupName, String sourceGroupName) throws IOException { + moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroupName, sourceGroupName, + rs -> getRegions(rs), info -> { try { String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); - return groupName.equals(targetGroup.getName()); + return groupName.equals(targetGroupName); } catch (IOException e) { LOG.warn("Failed to test group for region {} and target group {}", info, - targetGroup.getName()); + targetGroupName); return false; } }); } private void moveRegionsBetweenGroups(Set regionsOwners, Set
    newRegionsOwners, - RSGroupInfo targetGrp, Function> getRegionsInfo, + String targetGroupName, String sourceGroupName, Function> getRegionsInfo, Function validation) throws IOException { // Get server names corresponding to given Addresses List movedServerNames = new ArrayList<>(regionsOwners.size()); @@ -1001,7 +1002,7 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new for (RegionInfo region : getRegionsInfo.apply((T) owner.getAddress())) { if (!validation.apply(region)) { LOG.info("Moving region {}, which do not belong to RSGroup {}", - region.getShortNameToLog(), targetGrp.getName()); + region.getShortNameToLog(), targetGroupName); // Move region back to source RSGroup servers ServerName dest = masterServices.getLoadBalancer().randomAssignment(region, srcGrpServerNames); @@ -1015,17 +1016,16 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new assignmentFutures.add(Pair.newPair(region, future)); } catch (IOException ioe) { failedRegions.add(region.getRegionNameAsString()); - LOG.debug("Move region {} from group failed, will retry, current retry time is {}", + LOG.debug("Move region {} failed, will retry, current retry time is {}", region.getShortNameToLog(), retry, ioe); toThrow = ioe; } } } } - waitForRegionMovement(assignmentFutures, failedRegions, targetGrp.getName(), retry); + waitForRegionMovement(assignmentFutures, failedRegions, sourceGroupName, retry); if (failedRegions.isEmpty()) { - LOG.info("All regions from server(s) {} moved to target group {}.", movedServerNames, - targetGrp.getName()); + LOG.info("All regions from {} are moved back to {}", movedServerNames, sourceGroupName); return; } else { try { @@ -1043,7 +1043,7 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new if (!failedRegions.isEmpty()) { // print failed moved regions, for later process conveniently String msg = String - .format("move regions for group %s failed, failed regions: %s", targetGrp.getName(), + .format("move regions for group %s failed, failed regions: %s", sourceGroupName, failedRegions); LOG.error(msg); throw new DoNotRetryIOException( @@ -1056,9 +1056,9 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new * completion even if some region movement fails. */ private void waitForRegionMovement(List>> regionMoveFutures, - Set failedRegions, String tgtGrpName, int retryCount) { + Set failedRegions, String sourceGroupName, int retryCount) { LOG.info("Moving {} region(s) to group {}, current retry={}", regionMoveFutures.size(), - tgtGrpName, retryCount); + sourceGroupName, retryCount); for (Pair> pair : regionMoveFutures) { try { pair.getSecond().get(); @@ -1073,7 +1073,7 @@ private void waitForRegionMovement(List>> region } catch (Exception e) { failedRegions.add(pair.getFirst().getRegionNameAsString()); LOG.error("Move region {} to group {} failed, will retry on next attempt", - pair.getFirst().getShortNameToLog(), tgtGrpName, e); + pair.getFirst().getShortNameToLog(), sourceGroupName, e); } } } @@ -1225,7 +1225,6 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE if (StringUtils.isEmpty(targetGroupName)) { throw new ConstraintException("RSGroup cannot be null."); } - RSGroupInfo targetGroup = getRSGroupInfo(targetGroupName); // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. @@ -1270,7 +1269,7 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE // MovedServers may be < passed in 'servers'. Set
    movedServers = moveServers(servers, srcGrp.getName(), targetGroupName); - moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroup); + moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroupName, srcGrp.getName()); LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); } } From 1c217da2ff8b9d6163e9dceb896ebdbc552925d5 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Tue, 15 Dec 2020 21:45:39 -0800 Subject: [PATCH 271/769] HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side. (#2768) Signed-off-by: stack --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 + ...talogReplicaLoadBalanceSimpleSelector.java | 19 ++- ...talogReplicaLoadBalanceSimpleSelector.java | 132 ++++++++++++++++++ 4 files changed, 144 insertions(+), 11 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 2c2520f8bd12..1c686aca8b76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = 1; + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index c3ce868757f1..27be88a9def2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,6 +28,8 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { + int UNINITIALIZED_NUM_OF_REPLICAS = -1; + /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index bc8264050149..01996b34e2ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,7 +108,6 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; - private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -117,7 +116,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -146,7 +145,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -262,16 +261,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - if (newNumOfReplicas == 1) { - LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " - + "fetch the replica count", tableName); + // If the returned number of replicas is -1, it is caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. + if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + LOG.error("Failed to fetch Table {}'s region replica count", tableName); + return this.numOfReplicas; } - int cachedNumOfReplicas = this.numOfReplicas; - // If the returned number of replicas is 1, it is mostly caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. + int cachedNumOfReplicas = this.numOfReplicas; if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { + (cachedNumOfReplicas != newNumOfReplicas)) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java new file mode 100644 index 000000000000..6b14286f99ca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; +import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MediumTests.class, ClientTests.class }) +public class TestCatalogReplicaLoadBalanceSimpleSelector { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final Logger LOG = LoggerFactory.getLogger( + TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final int NB_SERVERS = 4; + private static int numOfMetaReplica = NB_SERVERS - 1; + + private static AsyncConnectionImpl CONN; + + private static ConnectionRegistry registry; + private static Admin admin; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + + TEST_UTIL.startMiniCluster(NB_SERVERS); + admin = TEST_UTIL.getAdmin(); + admin.balancerSwitch(false, true); + + // Enable hbase:meta replication. + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() >= numOfMetaReplica); + + registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + CONN = new AsyncConnectionImpl(conf, registry, + registry.getClusterId().get(), null, User.getCurrent()); + } + + @AfterClass + public static void tearDown() throws Exception { + IOUtils.closeQuietly(CONN); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMetaChangeFromReplicaNoReplica() throws IOException, InterruptedException { + String replicaSelectorClass = CONN.getConfiguration(). + get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + + CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get + (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + + assertNotEquals( + metaSelector.select(TableName.valueOf("test"), EMPTY_START_ROW, RegionLocateType.CURRENT), + RegionReplicaUtil.DEFAULT_REPLICA_ID); + + // Change to No meta replica + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() == 1); + + CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = + CatalogReplicaLoadBalanceSelectorFactory.createSelector( + replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get( + CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + assertEquals( + metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, + RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); + } +} From c3276801256aa16a62e5cdba7a37d4e18d59e880 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Tue, 15 Dec 2020 21:52:54 -0800 Subject: [PATCH 272/769] HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) (#2753) Signed-off-by: stack --- .../hbase/client/RawAsyncHBaseAdmin.java | 87 ++++++++++--------- .../hadoop/hbase/client/RegionInfo.java | 18 +++- .../hadoop/hbase/client/TestAdmin1.java | 19 ++++ 3 files changed, 82 insertions(+), 42 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 512e7a96aa6d..7823963c4099 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2388,51 +2388,56 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - try { - CompletableFuture> future; - if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { - String encodedName = Bytes.toString(regionNameOrEncodedRegionName); - if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { - // old format encodedName, should be meta region - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); - } else { - future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, - regionNameOrEncodedRegionName); - } + + CompletableFuture> future; + if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { + String encodedName = Bytes.toString(regionNameOrEncodedRegionName); + if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { + // old format encodedName, should be meta region + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); } else { - RegionInfo regionInfo = - CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); - if (regionInfo.isMetaRegion()) { - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); - } else { - future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); - } + future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, + regionNameOrEncodedRegionName); + } + } else { + // Not all regionNameOrEncodedRegionName here is going to be a valid region name, + // it needs to throw out IllegalArgumentException in case tableName is passed in. + RegionInfo regionInfo; + try { + regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( + regionNameOrEncodedRegionName); + } catch (IOException ioe) { + throw new IllegalArgumentException(ioe.getMessage()); } - CompletableFuture returnedFuture = new CompletableFuture<>(); - addListener(future, (location, err) -> { - if (err != null) { - returnedFuture.completeExceptionally(err); - return; - } - if (!location.isPresent() || location.get().getRegion() == null) { - returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); - } else { - returnedFuture.complete(location.get()); - } - }); - return returnedFuture; - } catch (IOException e) { - return failedFuture(e); + if (regionInfo.isMetaRegion()) { + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); + } else { + future = + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + } } + + CompletableFuture returnedFuture = new CompletableFuture<>(); + addListener(future, (location, err) -> { + if (err != null) { + returnedFuture.completeExceptionally(err); + return; + } + if (!location.isPresent() || location.get().getRegion() == null) { + returnedFuture.completeExceptionally( + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); + } else { + returnedFuture.complete(location.get()); + } + }); + return returnedFuture; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index d7460e9d15ef..b6bdd0103de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,7 +363,23 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; + if (parseRegionNameOrReturnNull(regionName) == null) { + if (regionName.length > MD5_HEX_LENGTH) { + return false; + } else if (regionName.length == MD5_HEX_LENGTH) { + return true; + } else { + String encodedName = Bytes.toString(regionName); + try { + Integer.parseInt(encodedName); + // If this is a valid integer, it could be hbase:meta's encoded region name. + return true; + } catch(NumberFormatException er) { + return false; + } + } + } + return false; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index a0ed836f9c75..cfd61d295f89 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -99,6 +99,25 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { assertTrue(exception instanceof TableNotFoundException); } + @Test + public void testCompactATableWithSuperLongTableName() throws Exception { + TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + try { + ADMIN.createTable(htd); + try { + ADMIN.majorCompactRegion(tableName.getName()); + ADMIN.majorCompactRegion(Bytes.toBytes("abcd")); + } catch (IllegalArgumentException iae) { + LOG.info("This is expected"); + } + } finally { + ADMIN.disableTable(tableName); + ADMIN.deleteTable(tableName); + } + } + @Test public void testCompactionTimestamps() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); From 74d68180e6fe19f3b0a4b24daff2508ba30e1ed0 Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Wed, 16 Dec 2020 18:46:21 +0530 Subject: [PATCH 273/769] HBASE-25246 Backup/Restore hbase cell tags Closes #2745 Signed-off-by: Anoop Sam John Signed-off-by: Viraj Jasani --- .../hbase/shaded/protobuf/ProtobufUtil.java | 70 ++++-- .../shaded/protobuf/TestProtobufUtil.java | 103 +++++++- .../apache/hadoop/hbase/mapreduce/Import.java | 5 +- .../hbase/mapreduce/ResultSerialization.java | 4 +- .../hbase/mapreduce/TestImportExport.java | 222 ++++++++++++++++++ .../hadoop/hbase/codec/MessageCodec.java | 2 +- src/main/asciidoc/_chapters/ops_mgt.adoc | 6 + 7 files changed, 391 insertions(+), 21 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index b9a08676f8ee..462ffb012d93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -1436,6 +1436,21 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final Result result) { + return toResult(result, false); + } + + /** + * Convert a client Result to a protocol buffer Result + * @param result the client Result to convert + * @param encodeTags whether to includeTags in converted protobuf result or not + * When @encodeTags is set to true, it will return all the tags in the response. + * These tags may contain some sensitive data like acl permissions, etc. + * Only the tools like Export, Import which needs to take backup needs to set + * it to true so that cell tags are persisted in backup. + * Refer to HBASE-25246 for more context. + * @return the converted protocol buffer Result + */ + public static ClientProtos.Result toResult(final Result result, boolean encodeTags) { if (result.getExists() != null) { return toResult(result.getExists(), result.isStale()); } @@ -1447,7 +1462,7 @@ public static ClientProtos.Result toResult(final Result result) { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); for (Cell c : cells) { - builder.addCell(toCell(c)); + builder.addCell(toCell(c, encodeTags)); } builder.setStale(result.isStale()); @@ -1494,6 +1509,22 @@ public static ClientProtos.Result toResultNoData(final Result result) { * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto) { + return toResult(proto, false); + } + + /** + * Convert a protocol buffer Result to a client Result + * + * @param proto the protocol buffer Result to convert + * @param decodeTags whether to decode tags into converted client Result + * When @decodeTags is set to true, it will decode all the tags from the + * response. These tags may contain some sensitive data like acl permissions, + * etc. Only the tools like Export, Import which needs to take backup needs to + * set it to true so that cell tags are persisted in backup. + * Refer to HBASE-25246 for more context. + * @return the converted client Result + */ + public static Result toResult(final ClientProtos.Result proto, boolean decodeTags) { if (proto.hasExists()) { if (proto.getStale()) { return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; @@ -1509,7 +1540,7 @@ public static Result toResult(final ClientProtos.Result proto) { List cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); for (CellProtos.Cell c : values) { - cells.add(toCell(builder, c)); + cells.add(toCell(builder, c, decodeTags)); } return Result.create(cells, null, proto.getStale(), proto.getPartial()); } @@ -1552,7 +1583,7 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); for (CellProtos.Cell c: values) { - cells.add(toCell(builder, c)); + cells.add(toCell(builder, c, false)); } } @@ -2000,7 +2031,7 @@ public static void toIOException(ServiceException se) throws IOException { throw new IOException(se); } - public static CellProtos.Cell toCell(final Cell kv) { + public static CellProtos.Cell toCell(final Cell kv, boolean encodeTags) { // Doing this is going to kill us if we do it for all data passed. // St.Ack 20121205 CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder(); @@ -2015,7 +2046,10 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); - // TODO : Once tags become first class then we may have to set tags to kvbuilder. + if (encodeTags) { + kvbuilder.setTags(wrap(((ByteBufferExtendedCell) kv).getTagsByteBuffer(), + ((ByteBufferExtendedCell) kv).getTagsPosition(), kv.getTagsLength())); + } } else { kvbuilder.setRow( UnsafeByteOperations.unsafeWrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); @@ -2027,6 +2061,10 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + if (encodeTags) { + kvbuilder.setTags(UnsafeByteOperations.unsafeWrap(kv.getTagsArray(), kv.getTagsOffset(), + kv.getTagsLength())); + } } return kvbuilder.build(); } @@ -2038,15 +2076,19 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { return UnsafeByteOperations.unsafeWrap(dup); } - public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - return cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()) - .build(); + public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, + boolean decodeTags) { + ExtendedCellBuilder builder = cellBuilder.clear() + .setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()) + .setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()) + .setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()); + if (decodeTags && cell.hasTags()) { + builder.setTags(cell.getTags().toByteArray()); + } + return builder.build(); } public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 7d6eda817cfa..c47150b04858 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -18,17 +18,24 @@ package org.apache.hadoop.hbase.shaded.protobuf; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -63,7 +70,8 @@ public class TestProtobufUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestProtobufUtil.class); - + private static final String TAG_STR = "tag-1"; + private static final byte TAG_TYPE = (byte)10; public TestProtobufUtil() { } @@ -271,9 +279,10 @@ public void testToCell() { ByteBuffer dbb = ByteBuffer.allocateDirect(arr.length); dbb.put(arr); ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); - CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV); + CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV, false); Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell); + ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, + false); assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); } @@ -479,4 +488,92 @@ public void testRegionLockInfo() { + "\"sharedLockCount\":0" + "}]", lockJson); } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encode/decode tags is set to true. + */ + @Test + public void testCellConversionWithTags() { + + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, true); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(1, decodedTags.size()); + Tag decodedTag = decodedTags.get(0); + assertEquals(TAG_TYPE, decodedTag.getType()); + assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); + } + + private Cell getCellWithTags() { + Tag tag = new ArrayBackedTag(TAG_TYPE, TAG_STR); + ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + cellBuilder.setRow(Bytes.toBytes("row1")); + cellBuilder.setFamily(Bytes.toBytes("f1")); + cellBuilder.setQualifier(Bytes.toBytes("q1")); + cellBuilder.setValue(Bytes.toBytes("value1")); + cellBuilder.setType(Cell.Type.Delete); + cellBuilder.setTags(Collections.singletonList(tag)); + return cellBuilder.build(); + } + + private Cell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTags) { + ExtendedCellBuilder decodedBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + return ProtobufUtil.toCell(decodedBuilder, protoCell, decodeTags); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encode/decode tags is set to false. + */ + @Test + public void testCellConversionWithoutTags() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, false); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encoding of tags is set to false + * and decoding of tags is set to true. + */ + @Test + public void testTagEncodeFalseDecodeTrue() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, true); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encoding of tags is set to true + * and decoding of tags is set to false. + */ + @Test + public void testTagEncodeTrueDecodeFalse() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, false); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 239a12bdc688..30071fdfd809 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -511,6 +512,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { + List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -524,7 +526,8 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength()); // value length + kv.getValueLength(), // value length + tags.size() == 0 ? null: tags); } } return kv; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index dac1d425d806..9fdaa7b78f75 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -128,7 +128,7 @@ public void close() throws IOException { @Override public Result deserialize(Result mutation) throws IOException { ClientProtos.Result proto = ClientProtos.Result.parseDelimitedFrom(in); - return ProtobufUtil.toResult(proto); + return ProtobufUtil.toResult(proto, true); } @Override @@ -152,7 +152,7 @@ public void open(OutputStream out) throws IOException { @Override public void serialize(Result result) throws IOException { - ProtobufUtil.toResult(result).writeDelimitedTo(out); + ProtobufUtil.toResult(result, true).writeDelimitedTo(out); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 12060a742a2b..7b38c59c9387 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY; +import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -34,10 +36,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -46,10 +51,14 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -58,11 +67,18 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.CellImporter; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -117,6 +133,9 @@ public class TestImportExport { private static final long now = System.currentTimeMillis(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final String TEST_ATTR = "source_op"; + public static final String TEST_TAG = "test_tag"; @BeforeClass public static void beforeClass() throws Throwable { @@ -801,4 +820,207 @@ public boolean isWALVisited() { return isVisited; } } + + /** + * Add cell tags to delete mutations, run export and import tool and + * verify that tags are present in import table also. + * @throws Throwable throws Throwable. + */ + @Test + public void testTagsAddition() throws Throwable { + final TableName exportTable = TableName.valueOf(name.getMethodName()); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(desc); + + Table exportT = UTIL.getConnection().getTable(exportTable); + + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + exportT.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + exportT.delete(d); + + // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool + // will use KeyValueCodecWithTags. + String[] args = new String[] { + "-D" + ExportUtils.RAW_SCAN + "=true", + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + exportTable.getNameAsString(), + FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); + // Assert tag exists in exportTable + checkWhetherTagExists(exportTable, true); + + // Create an import table with MetadataController. + final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); + TableDescriptor importTableDesc = TableDescriptorBuilder + .newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(importTableDesc); + + // Run import tool. + args = new String[] { + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + importTable.getNameAsString(), + FQ_OUTPUT_DIR + }; + assertTrue(runImport(args)); + // Make sure that tags exists in imported table. + checkWhetherTagExists(importTable, true); + } + + private void checkWhetherTagExists(TableName table, boolean tagExists) throws IOException { + List values = new ArrayList<>(); + for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { + Scan scan = new Scan(); + // Make sure to set rawScan to true so that we will get Delete Markers. + scan.setRaw(true); + scan.readAllVersions(); + scan.withStartRow(ROW1); + // Need to use RegionScanner instead of table#getScanner since the latter will + // not return tags since it will go through rpc layer and remove tags intentionally. + RegionScanner scanner = region.getScanner(scan); + scanner.next(values); + if (!values.isEmpty()) { + break; + } + } + boolean deleteFound = false; + for (Cell cell: values) { + if (PrivateCellUtil.isDelete(cell.getType().getCode())) { + deleteFound = true; + List tags = PrivateCellUtil.getTags(cell); + // If tagExists flag is true then validate whether tag contents are as expected. + if (tagExists) { + Assert.assertEquals(1, tags.size()); + for (Tag tag : tags) { + Assert.assertEquals(TEST_TAG, Tag.getValueAsString(tag)); + } + } else { + // If tagExists flag is disabled then check for 0 size tags. + assertEquals(0, tags.size()); + } + } + } + Assert.assertTrue(deleteFound); + } + + /* + This co-proc will add a cell tag to delete mutation. + */ + public static class MetadataController implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) + throws IOException { + if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { + return; + } + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (!(m instanceof Delete)) { + continue; + } + byte[] sourceOpAttr = m.getAttribute(TEST_ATTR); + if (sourceOpAttr == null) { + continue; + } + Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); + List updatedCells = new ArrayList<>(); + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + Cell cell = cellScanner.current(); + List tags = PrivateCellUtil.getTags(cell); + tags.add(sourceOpTag); + Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + updatedCells.add(updatedCell); + } + m.getFamilyCellMap().clear(); + // Clear and add new Cells to the Mutation. + for (Cell cell : updatedCells) { + Delete d = (Delete) m; + d.add(cell); + } + } + } + } + + /** + * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string + * This means it will use no Codec. Make sure that we don't return Tags in response. + * @throws Exception Exception + */ + @Test + public void testTagsWithEmptyCodec() throws Exception { + TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor tableDesc = TableDescriptorBuilder + .newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(tableDesc); + Configuration conf = new Configuration(UTIL.getConfiguration()); + conf.set(RPC_CODEC_CONF_KEY, ""); + conf.set(DEFAULT_CODEC_CLASS, ""); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + table.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + table.delete(d); + + // Since RPC_CODEC_CONF_KEY and DEFAULT_CODEC_CLASS is set to empty, it will use + // empty Codec and it shouldn't encode/decode tags. + Scan scan = new Scan().withStartRow(ROW1).setRaw(true); + ResultScanner scanner = table.getScanner(scan); + int count = 0; + Result result; + while ((result = scanner.next()) != null) { + List cells = result.listCells(); + assertEquals(2, cells.size()); + Cell cell = cells.get(0); + assertTrue(CellUtil.isDelete(cell)); + List tags = PrivateCellUtil.getTags(cell); + assertEquals(0, tags.size()); + count++; + } + assertEquals(1, count); + } finally { + UTIL.deleteTable(tableName); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index 4b266e2bda7a..ddbbb5fc8bdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -74,7 +74,7 @@ static class MessageDecoder extends BaseDecoder { @Override protected Cell parseCell() throws IOException { - return ProtobufUtil.toCell(cellBuilder, CellProtos.Cell.parseDelimitedFrom(this.in)); + return ProtobufUtil.toCell(cellBuilder, CellProtos.Cell.parseDelimitedFrom(this.in), false); } } diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index e491cbc95b54..2c5a3d413c47 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -735,6 +735,9 @@ specifying column families and applying filters during the export. By default, the `Export` tool only exports the newest version of a given cell, regardless of the number of versions stored. To export more than one version, replace *__* with the desired number of versions. +For mapreduce based Export, if you want to export cell tags then set the following config property +`hbase.client.rpc.codec` to `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags` + Note: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration. [[import]] @@ -755,6 +758,9 @@ To import 0.94 exported files in a 0.96 cluster or onwards, you need to set syst $ bin/hbase -Dhbase.import.version=0.94 org.apache.hadoop.hbase.mapreduce.Import ---- +If you want to import cell tags then set the following config property +`hbase.client.rpc.codec` to `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags` + [[importtsv]] === ImportTsv From 888a4fb86a3a72636a699f2c8ef968adac081545 Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 15 Dec 2020 21:02:45 -0800 Subject: [PATCH 274/769] HBASE-25400 [Flakey Tests] branch-2 TestRegionMoveAndAbandon --- .../hbase/master/assignment/TestRegionMoveAndAbandon.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java index 45e9d01972d6..cc8335aa25af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java @@ -103,8 +103,9 @@ public void test() throws Exception { LOG.info("Killing RS {}", rs1.getServerName()); // Stop RS1 cluster.killRegionServer(rs1.getServerName()); + UTIL.waitFor(30_000, () -> rs1.isStopped() && !rs1.isAlive()); // Region should get moved to RS2 - UTIL.waitTableAvailable(tableName, 30_000); + UTIL.waitTableAvailable(tableName, 60_000); // Restart the master LOG.info("Killing master {}", cluster.getMaster().getServerName()); cluster.killMaster(cluster.getMaster().getServerName()); From 15d229eb35eeb49737dfa6ec2e328161ddd6a2db Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 16 Dec 2020 11:31:54 -0800 Subject: [PATCH 275/769] Add entry for 2.4.0 to downloads.xml --- src/site/xdoc/downloads.xml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index bbd60e5e2e4a..fe6f3d8d198e 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -43,6 +43,29 @@ under the License. Download Notices + + + 2.4.0 + + + 2020/12/15 + + + 2.4.0 vs 2.3.0 + + + Changes + + + Release Notes + + + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) + + + 2.3.3 From f600856a3b11ea863776400e8a8cda04d1a33a15 Mon Sep 17 00:00:00 2001 From: Sandeep Pal Date: Fri, 18 Dec 2020 13:23:00 +0530 Subject: [PATCH 276/769] HBASE-25383: Ability to update and remove peer base config Closes #2778 Signed-off-by: Bharath Vissapragada Signed-off-by: Geoffrey Jacoby Signed-off-by: Viraj Jasani --- .../ReplicationPeerConfigUtil.java | 34 +++--- .../replication/ReplicationPeerConfig.java | 6 + .../ReplicationPeerConfigBuilder.java | 9 ++ .../TestZKReplicationPeerStorage.java | 67 +++++++++-- .../replication/ReplicationPeerManager.java | 4 +- .../replication/TestMasterReplication.java | 107 +++++++++++++----- 6 files changed, 174 insertions(+), 53 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index c5dcd762e96f..05343eae4ccd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -27,7 +27,6 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -40,12 +39,12 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -246,7 +245,7 @@ public static Map> convert2Map(ReplicationProtos.TableCF /** * @param bytes Content of a peer znode. * @return ClusterKey parsed from the passed bytes. - * @throws DeserializationException + * @throws DeserializationException deserialization exception */ public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException { @@ -390,7 +389,7 @@ public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig pe } /** - * @param peerConfig + * @param peerConfig peer config of replication peer * @return Serialized protobuf of peerConfig with pb magic prefix prepended suitable * for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under * /hbase/replication/peers/PEER_ID @@ -454,37 +453,42 @@ public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig( } /** - * Helper method to add base peer configs from Configuration to ReplicationPeerConfig - * if not present in latter. + * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig * * This merges the user supplied peer configuration * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs * provided as property hbase.replication.peer.base.configs in hbase configuration. - * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1". Original value - * of conf is retained if already present in ReplicationPeerConfig. + * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". + * If value is empty, it will remove the existing key-value from peer config. * * @param conf Configuration * @return ReplicationPeerConfig containing updated configs. */ - public static ReplicationPeerConfig addBasePeerConfigsIfNotPresent(Configuration conf, + public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configuration conf, ReplicationPeerConfig receivedPeerConfig) { - String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); ReplicationPeerConfigBuilder copiedPeerConfigBuilder = ReplicationPeerConfig. newBuilder(receivedPeerConfig); - Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); if (basePeerConfigs.length() != 0) { Map basePeerConfigMap = Splitter.on(';').trimResults().omitEmptyStrings() .withKeyValueSeparator("=").split(basePeerConfigs); - for (Map.Entry entry : basePeerConfigMap.entrySet()) { + for (Map.Entry entry : basePeerConfigMap.entrySet()) { String configName = entry.getKey(); String configValue = entry.getValue(); - // Only override if base config does not exist in existing peer configs - if (!receivedPeerConfigMap.containsKey(configName)) { + // If the config is provided with empty value, for eg. k1="", + // we remove it from peer config. Providing config with empty value + // is required so that it doesn't remove any other config unknowingly. + if (Strings.isNullOrEmpty(configValue)) { + copiedPeerConfigBuilder.removeConfiguration(configName); + } else if (!receivedPeerConfigMap.getOrDefault(configName, "").equals(configValue)) { + // update the configuration if exact config and value doesn't exists copiedPeerConfigBuilder.putConfiguration(configName, configValue); } } } + return copiedPeerConfigBuilder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index aba703ccdee8..bb3ff042ca06 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -294,6 +294,12 @@ public ReplicationPeerConfigBuilder putConfiguration(String key, String value) { return this; } + @Override + public ReplicationPeerConfigBuilder removeConfiguration(String key) { + this.configuration.remove(key); + return this; + } + @Override public ReplicationPeerConfigBuilder putPeerData(byte[] key, byte[] value) { this.peerData.put(key, value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index 58ff220e5631..c6a97fad9e81 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -52,6 +52,15 @@ public interface ReplicationPeerConfigBuilder { @InterfaceAudience.Private ReplicationPeerConfigBuilder putConfiguration(String key, String value); + /** + * Removes a "raw" configuration property for this replication peer. For experts only. + * @param key Configuration property key to ve removed + * @return {@code this} + */ + @InterfaceAudience.Private + ReplicationPeerConfigBuilder removeConfiguration(String key); + + /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index e7ee1e7c4835..18b0c121e67e 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -26,7 +26,6 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - import java.io.IOException; import java.util.HashMap; import java.util.Iterator; @@ -35,7 +34,6 @@ import java.util.Random; import java.util.Set; import java.util.stream.Stream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtility; @@ -45,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -73,6 +72,11 @@ public static void tearDown() throws IOException { UTIL.shutdownMiniZKCluster(); } + @After + public void cleanCustomConfigurations() { + UTIL.getConfiguration().unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + private Set randNamespaces(Random rand) { return Stream.generate(() -> Long.toHexString(rand.nextLong())).limit(rand.nextInt(5)) .collect(toSet()); @@ -220,8 +224,7 @@ public void testNoSyncReplicationState() STORAGE.getNewSyncReplicationStateNode(peerId))); } - @Test - public void testBaseReplicationPeerConfig() { + @Test public void testBaseReplicationPeerConfig() throws ReplicationException{ String customPeerConfigKey = "hbase.xxx.custom_config"; String customPeerConfigValue = "test"; String customPeerConfigUpdatedValue = "testUpdated"; @@ -241,7 +244,7 @@ public void testBaseReplicationPeerConfig() { concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,existingReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). @@ -249,17 +252,63 @@ public void testBaseReplicationPeerConfig() { assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). get(customPeerConfigSecondKey)); - // validates base configs does not override value if config already present + // validates base configs get updated values even if config already present + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,updatedReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigSecondKey)); } + + @Test public void testBaseReplicationRemovePeerConfig() throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + String customPeerConfigValue = "test"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat(customPeerConfigValue)); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + + // validates base configs are present in replicationPeerConfig + assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). + get(customPeerConfigKey)); + + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + + assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); + } + + @Test public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() + throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 2c930e103fc8..add51210a38f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -233,7 +233,7 @@ public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean ena // this should be a retry, just return return; } - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build(); SyncReplicationState syncReplicationState = copiedPeerConfig.isSyncReplication() ? SyncReplicationState.DOWNGRADE_ACTIVE @@ -547,7 +547,7 @@ public static ReplicationPeerManager create(ZKWatcher zk, Configuration conf, St for (String peerId : peerStorage.listPeerIds()) { ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId); - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); peerStorage.updatePeerConfig(peerId, peerConfig); boolean enabled = peerStorage.isPeerEnabled(peerId); SyncReplicationState state = peerStorage.getPeerSyncReplicationState(peerId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index b7e5edd649b2..b2e0e6d4860e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; - import java.io.Closeable; import java.io.IOException; import java.util.Arrays; @@ -29,7 +28,6 @@ import java.util.Optional; import java.util.Random; import java.util.concurrent.CountDownLatch; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -200,8 +198,8 @@ public void testHFileCyclicReplication() throws Exception { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -212,10 +210,10 @@ public void testHFileCyclicReplication() throws Exception { // Load 200 rows for each hfile range in cluster '1' and validate whether its been replicated // to cluster '0'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1] }; + hfileRanges.length * numOfRows + expectedCounts[1] }; loadAndValidateHFileReplication("testHFileCyclicReplication_10", 1, new int[] { 0 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -314,12 +312,12 @@ public void testHFileMultiSlaveReplication() throws Exception { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, - new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, + new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; int numOfRows = 100; int[] expectedCounts = - new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; + new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_0", 0, new int[] { 1 }, row, famName, htables, hfileRanges, numOfRows, expectedCounts, true); @@ -335,11 +333,11 @@ public void testHFileMultiSlaveReplication() throws Exception { // Load 200 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1' and '2'. Previous data should be replicated to cluster '2'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("ssss"), Bytes.toBytes("uuuu") }, - new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; + new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; + hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_1", 0, new int[] { 1, 2 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -370,8 +368,8 @@ public void testHFileReplicationForConfiguredTableCfs() throws Exception { // Load 100 rows for each hfile range in cluster '0' for table CF 'f' byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -381,11 +379,11 @@ public void testHFileReplicationForConfiguredTableCfs() throws Exception { // Load 100 rows for each hfile range in cluster '0' for table CF 'f1' hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 100; int[] newExpectedCounts = - new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; + new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; loadAndValidateHFileReplication("load_f1", 0, new int[] { 1 }, row, famName1, htables, hfileRanges, numOfRows, newExpectedCounts, false); @@ -449,7 +447,7 @@ public void testCyclicReplication3() throws Exception { * */ @Test - public void testBasePeerConfigsForPeerMutations() + public void testBasePeerConfigsForReplicationPeer() throws Exception { LOG.info("testBasePeerConfigsForPeerMutations"); String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; @@ -502,18 +500,15 @@ public void testBasePeerConfigsForPeerMutations() utilities[0].restartHBaseCluster(1); admin = utilities[0].getAdmin(); - // Both retains the value of base configuration 1 value as before restart. - // Peer 1 (Update value), Peer 2 (Base Value) - Assert.assertEquals(firstCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("1"). + // Configurations should be updated after restart again + Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). getConfiguration().get(firstCustomPeerConfigKey)); Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). getConfiguration().get(firstCustomPeerConfigKey)); - // Peer 1 gets new base config as part of restart. Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). getConfiguration().get(secondCustomPeerConfigKey)); - // Peer 2 retains the updated value as before restart. - Assert.assertEquals(secondCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("2"). + Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). getConfiguration().get(secondCustomPeerConfigKey)); } finally { shutDownMiniClusters(); @@ -521,6 +516,64 @@ public void testBasePeerConfigsForPeerMutations() } } + @Test + public void testBasePeerConfigsRemovalForReplicationPeer() + throws Exception { + LOG.info("testBasePeerConfigsForPeerMutations"); + String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; + String firstCustomPeerConfigValue = "test"; + + try { + baseConfiguration.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat(firstCustomPeerConfigValue)); + startMiniClusters(2); + addPeer("1", 0, 1); + Admin admin = utilities[0].getAdmin(); + + // Validates base configs 1 is present for both peer. + Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). + getConfiguration().get(firstCustomPeerConfigKey)); + + utilities[0].getConfiguration().unset(ReplicationPeerConfigUtil. + HBASE_REPLICATION_PEER_BASE_CONFIG); + utilities[0].getConfiguration().set(ReplicationPeerConfigUtil. + HBASE_REPLICATION_PEER_BASE_CONFIG, firstCustomPeerConfigKey.concat("=").concat("")); + + + utilities[0].shutdownMiniHBaseCluster(); + utilities[0].restartHBaseCluster(1); + admin = utilities[0].getAdmin(); + + // Configurations should be removed after restart again + Assert.assertNull(admin.getReplicationPeerConfig("1") + .getConfiguration().get(firstCustomPeerConfigKey)); + } finally { + shutDownMiniClusters(); + baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + } + + @Test + public void testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer() + throws Exception { + LOG.info("testBasePeerConfigsForPeerMutations"); + String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; + + try { + baseConfiguration.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat("")); + startMiniClusters(2); + addPeer("1", 0, 1); + Admin admin = utilities[0].getAdmin(); + + Assert.assertNull("Config should not be there", admin.getReplicationPeerConfig("1"). + getConfiguration().get(firstCustomPeerConfigKey)); + } finally { + shutDownMiniClusters(); + baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + } + @After public void tearDown() throws IOException { configurations = null; @@ -743,11 +796,11 @@ private void rollWALAndWait(final HBaseTestingUtility utility, final TableName t // listen for successful log rolls final WALActionsListener listener = new WALActionsListener() { - @Override - public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { - latch.countDown(); - } - }; + @Override + public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { + latch.countDown(); + } + }; region.getWAL().registerWALActionsListener(listener); // request a roll From 33441f540ae77a19a7115364964f630631d560e7 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 19 Dec 2020 20:28:30 +0800 Subject: [PATCH 277/769] Revert "HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) (#2753)" This reverts commit c3276801256aa16a62e5cdba7a37d4e18d59e880. --- .../hbase/client/RawAsyncHBaseAdmin.java | 87 +++++++++---------- .../hadoop/hbase/client/RegionInfo.java | 18 +--- .../hadoop/hbase/client/TestAdmin1.java | 19 ---- 3 files changed, 42 insertions(+), 82 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 7823963c4099..512e7a96aa6d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2388,56 +2388,51 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - - CompletableFuture> future; - if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { - String encodedName = Bytes.toString(regionNameOrEncodedRegionName); - if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { - // old format encodedName, should be meta region - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); + try { + CompletableFuture> future; + if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { + String encodedName = Bytes.toString(regionNameOrEncodedRegionName); + if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { + // old format encodedName, should be meta region + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); + } else { + future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, + regionNameOrEncodedRegionName); + } } else { - future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, - regionNameOrEncodedRegionName); - } - } else { - // Not all regionNameOrEncodedRegionName here is going to be a valid region name, - // it needs to throw out IllegalArgumentException in case tableName is passed in. - RegionInfo regionInfo; - try { - regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( - regionNameOrEncodedRegionName); - } catch (IOException ioe) { - throw new IllegalArgumentException(ioe.getMessage()); + RegionInfo regionInfo = + CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); + if (regionInfo.isMetaRegion()) { + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); + } else { + future = + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + } } - if (regionInfo.isMetaRegion()) { - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); - } else { - future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); - } + CompletableFuture returnedFuture = new CompletableFuture<>(); + addListener(future, (location, err) -> { + if (err != null) { + returnedFuture.completeExceptionally(err); + return; + } + if (!location.isPresent() || location.get().getRegion() == null) { + returnedFuture.completeExceptionally( + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); + } else { + returnedFuture.complete(location.get()); + } + }); + return returnedFuture; + } catch (IOException e) { + return failedFuture(e); } - - CompletableFuture returnedFuture = new CompletableFuture<>(); - addListener(future, (location, err) -> { - if (err != null) { - returnedFuture.completeExceptionally(err); - return; - } - if (!location.isPresent() || location.get().getRegion() == null) { - returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); - } else { - returnedFuture.complete(location.get()); - } - }); - return returnedFuture; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index b6bdd0103de8..d7460e9d15ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,23 +363,7 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - if (parseRegionNameOrReturnNull(regionName) == null) { - if (regionName.length > MD5_HEX_LENGTH) { - return false; - } else if (regionName.length == MD5_HEX_LENGTH) { - return true; - } else { - String encodedName = Bytes.toString(regionName); - try { - Integer.parseInt(encodedName); - // If this is a valid integer, it could be hbase:meta's encoded region name. - return true; - } catch(NumberFormatException er) { - return false; - } - } - } - return false; + return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index cfd61d295f89..a0ed836f9c75 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -99,25 +99,6 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { assertTrue(exception instanceof TableNotFoundException); } - @Test - public void testCompactATableWithSuperLongTableName() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); - try { - ADMIN.createTable(htd); - try { - ADMIN.majorCompactRegion(tableName.getName()); - ADMIN.majorCompactRegion(Bytes.toBytes("abcd")); - } catch (IllegalArgumentException iae) { - LOG.info("This is expected"); - } - } finally { - ADMIN.disableTable(tableName); - ADMIN.deleteTable(tableName); - } - } - @Test public void testCompactionTimestamps() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); From 7b1e9cd0d2bb6bde4f813d5d53ff2b13b06847ff Mon Sep 17 00:00:00 2001 From: Akshay Sudheer <74921542+AkshayTSudheer@users.noreply.github.com> Date: Sun, 20 Dec 2020 02:07:21 +0530 Subject: [PATCH 278/769] HBASE-25404 Procedures table Id under master web UI gets word break to single character (#2783) Signed-off-by: Duo Zhang --- .../src/main/resources/hbase-webapps/master/procedures.jsp | 2 +- .../src/main/resources/hbase-webapps/static/css/hbase.css | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp index 8e1e23805abd..fba9a42b94e9 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp @@ -123,7 +123,7 @@

    We do not list procedures that have completed successfully; their number makes it hard to spot the problematics.

    - +
    diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css b/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css index d1cc0fecf233..2661c8d62229 100644 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css @@ -39,6 +39,11 @@ section { margin-bottom: 3em; } margin-top: 1.2em; } +table#tab_Procedures td:nth-child(-n+7) { + word-break: normal; + overflow-wrap: normal; +} + /* Region Server page styling */ /* striped tables styling */ From 1540b89ceece9c2ebe10959a32519787cd39dc50 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 20 Dec 2020 11:26:36 +0800 Subject: [PATCH 279/769] HBASE-25420 Some minor improvements in rpc implementation (#2792) Signed-off-by: XinSun Signed-off-by: stack --- .../hbase/ipc/NettyRpcDuplexHandler.java | 28 ++++++------ .../hbase/ipc/NettyRpcFrameDecoder.java | 8 ++-- .../apache/hadoop/hbase/ipc/ServerCall.java | 31 ++++++++----- .../hadoop/hbase/ipc/SimpleRpcServer.java | 45 +++++++++---------- 4 files changed, 58 insertions(+), 54 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 649375a89c1c..2a2df8a7ad4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -17,35 +17,35 @@ */ package org.apache.hadoop.hbase.ipc; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; - import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hbase.CellScanner; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; -import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.ipc.RemoteException; /** * The netty rpc handler. @@ -103,8 +103,8 @@ private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise p ctx.write(buf, withoutCellBlockPromise); ChannelPromise cellBlockPromise = ctx.newPromise(); ctx.write(cellBlock, cellBlockPromise); - PromiseCombiner combiner = new PromiseCombiner(); - combiner.addAll(withoutCellBlockPromise, cellBlockPromise); + PromiseCombiner combiner = new PromiseCombiner(ctx.executor()); + combiner.addAll((ChannelFuture) withoutCellBlockPromise, cellBlockPromise); combiner.finish(promise); } else { ctx.write(buf, promise); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java index 5ed3d2ef43f3..9444cd0dee99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.exceptions.RequestTooBigException; @@ -30,6 +29,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.handler.codec.CorruptedFrameException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -124,10 +124,8 @@ private void handleTooBigRequest(ByteBuf in) throws IOException { RPCProtos.RequestHeader header = getHeader(in, headerSize); // Notify the client about the offending request - NettyServerCall reqTooBig = - new NettyServerCall(header.getCallId(), connection.service, null, null, null, null, - connection, 0, connection.addr, System.currentTimeMillis(), 0, - connection.rpcServer.bbAllocator, connection.rpcServer.cellBlockBuilder, null); + NettyServerCall reqTooBig = connection.createCall(header.getCallId(), connection.service, null, + null, null, null, 0, connection.addr, 0, null); connection.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index d20e28f8c786..a5c8a3920b17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -26,25 +26,27 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.io.ByteBuffAllocator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.ByteBufferUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.util.StringUtils; /** * Datastructure that holds all necessary to a method invocation and then afterward, carries @@ -217,10 +219,14 @@ public String toShortString() { } @Override - public synchronized void setResponse(Message m, final CellScanner cells, - Throwable t, String errorMsg) { - if (this.isError) return; - if (t != null) this.isError = true; + public synchronized void setResponse(Message m, final CellScanner cells, Throwable t, + String errorMsg) { + if (this.isError) { + return; + } + if (t != null) { + this.isError = true; + } BufferChain bc = null; try { ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder(); @@ -385,9 +391,10 @@ private static ByteBuffer createHeaderAndMessageBytes(Message result, Message he return pbBuf; } - protected BufferChain wrapWithSasl(BufferChain bc) - throws IOException { - if (!this.connection.useSasl) return bc; + protected BufferChain wrapWithSasl(BufferChain bc) throws IOException { + if (!this.connection.useSasl) { + return bc; + } // Looks like no way around this; saslserver wants a byte array. I have to make it one. // THIS IS A BIG UGLY COPY. byte [] responseBytes = bc.getBytes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index f3f78073dc5d..cbcbc9a8f7a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -40,24 +40,23 @@ import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.security.HBasePolicyProvider; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; +import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The RPC server with native java NIO implementation deriving from Hadoop to @@ -307,7 +306,7 @@ void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfM // If the connectionManager can't take it, close the connection. if (c == null) { if (channel.isOpen()) { - IOUtils.cleanup(null, channel); + IOUtils.cleanupWithLogger(LOG, channel); } continue; } @@ -416,10 +415,12 @@ protected void closeConnection(SimpleServerRpcConnection connection) { @Override public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } - /** Starts the service. Must be called before any calls will be handled. */ + /** Starts the service. Must be called before any calls will be handled. */ @Override public synchronized void start() { - if (started) return; + if (started) { + return; + } authTokenSecretMgr = createSecretManager(); if (authTokenSecretMgr != null) { setSecretManager(authTokenSecretMgr); @@ -433,7 +434,7 @@ public synchronized void start() { started = true; } - /** Stops the service. No new calls will be handled after this is called. */ + /** Stops the service. No new calls will be handled after this is called. */ @Override public synchronized void stop() { LOG.info("Stopping server on " + port); @@ -449,10 +450,9 @@ public synchronized void stop() { notifyAll(); } - /** Wait for the server to be stopped. - * Does not wait for all subthreads to finish. - * See {@link #stop()}. - * @throws InterruptedException e + /** + * Wait for the server to be stopped. Does not wait for all subthreads to finish. + * @see #stop() */ @Override public synchronized void join() throws InterruptedException { @@ -503,13 +503,14 @@ public Pair call(BlockingService service, MethodDescriptor * @param channel writable byte channel to write to * @param bufferChain Chain of buffers to write * @return number of bytes written - * @throws java.io.IOException e * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) */ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChain) - throws IOException { - long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); - if (count > 0) this.metrics.sentBytes(count); + throws IOException { + long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); + if (count > 0) { + this.metrics.sentBytes(count); + } return count; } @@ -523,22 +524,20 @@ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChai * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ - public static void bind(ServerSocket socket, InetSocketAddress address, - int backlog) throws IOException { + public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) + throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = - new BindException("Problem binding to " + address + " : " + - e.getMessage()); + new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { - throw new UnknownHostException("Invalid hostname for server: " + - address.getHostName()); + throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } throw e; } From 772350b26e9db75bf5b7c7bc59e817a3e39d637b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 20 Dec 2020 12:00:07 +0800 Subject: [PATCH 280/769] HBASE-25419 Remove deprecated methods in RpcServer implementation (#2791) Signed-off-by: XinSun Signed-off-by: stack --- .../hadoop/hbase/ipc/NettyRpcServer.java | 23 ------------------- .../hadoop/hbase/ipc/RpcServerInterface.java | 20 +--------------- .../hadoop/hbase/ipc/SimpleRpcServer.java | 23 ------------------- 3 files changed, 1 insertion(+), 65 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index f34cad5f60c3..1d3981f78846 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -23,22 +23,16 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.HBasePolicyProvider; import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.io.netty.bootstrap.ServerBootstrap; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelInitializer; @@ -182,21 +176,4 @@ public int getNumOpenConnections() { // allChannels also contains the server channel, so exclude that from the count. return channelsCount > 0 ? channelsCount - 1 : channelsCount; } - - @Override - public Pair call(BlockingService service, - MethodDescriptor md, Message param, CellScanner cellScanner, - long receiveTime, MonitoredRPCHandler status) throws IOException { - return call(service, md, param, cellScanner, receiveTime, status, - System.currentTimeMillis(), 0); - } - - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { - NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null, - -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null); - return call(fakeCall, status); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index e06daacf5fee..ee6e57a2a9f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -25,14 +25,12 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @InterfaceAudience.Private @@ -46,22 +44,6 @@ public interface RpcServerInterface { void setSocketSendBufSize(int size); InetSocketAddress getListenerAddress(); - /** - * @deprecated As of release 1.3, this will be removed in HBase 3.0 - */ - @Deprecated - Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException; - - /** - * @deprecated As of release 2.0, this will be removed in HBase 3.0 - */ - @Deprecated - Pair call(BlockingService service, MethodDescriptor md, Message param, - CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime, - int timeout) throws IOException; - Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index cbcbc9a8f7a8..38c771277360 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -41,22 +41,16 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.security.HBasePolicyProvider; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The RPC server with native java NIO implementation deriving from Hadoop to @@ -475,23 +469,6 @@ public synchronized InetSocketAddress getListenerAddress() { return listener.getAddress(); } - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException { - return call(service, md, param, cellScanner, receiveTime, status, System.currentTimeMillis(), - 0); - } - - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { - SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner, - null, -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null, null); - return call(fakeCall, status); - } - /** * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. From 51cee0016f0caa76e4eaa9b44d45705908e79938 Mon Sep 17 00:00:00 2001 From: scotthunt Date: Mon, 21 Dec 2020 01:39:13 -0700 Subject: [PATCH 281/769] [PATCH] Add "regexstringnocase" to ParseFilter for case-insensitivity (#2784) Signed-off-by: stack Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/filter/ParseConstants.java | 7 +++++++ .../apache/hadoop/hbase/filter/ParseFilter.java | 4 ++++ .../hadoop/hbase/filter/TestParseFilter.java | 15 +++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index 9f52783dbb0c..b9132a3ba295 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -244,6 +244,13 @@ public final class ParseConstants { public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', 's','t','r','i','n','g'}; + /** + * RegexStringNoCaseType byte array + */ + public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', + 's','t','r','i','n','g', + 'n','o','c','a','s','e'}; + /** * SubstringType byte array */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 1aeaa13f5a93..e06c6b5c4139 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -28,6 +28,7 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; @@ -812,6 +813,9 @@ else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); + else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) + return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), + Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index 8dd15be6fc63..fbedc1c0e688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -24,6 +24,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; +import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -279,6 +280,20 @@ public void testQualifierFilter() throws IOException { assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } + @Test + public void testQualifierFilterNoCase() throws IOException { + String filterString = "QualifierFilter(=, 'regexstringnocase:pre*')"; + QualifierFilter qualifierFilter = + doTestFilter(filterString, QualifierFilter.class); + assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); + assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); + RegexStringComparator regexStringComparator = + (RegexStringComparator) qualifierFilter.getComparator(); + assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); + int regexComparatorFlags = regexStringComparator.getEngine().getFlags(); + assertEquals(Pattern.CASE_INSENSITIVE | Pattern.DOTALL, regexComparatorFlags); + } + @Test public void testValueFilter() throws IOException { String filterString = "ValueFilter(!=, 'substring:pre')"; From d13010fa260ac313b027333ec6c8a4c847fa3e6e Mon Sep 17 00:00:00 2001 From: Wellington Chevreuil Date: Mon, 21 Dec 2020 08:41:52 +0000 Subject: [PATCH 282/769] Revert "[PATCH] Add "regexstringnocase" to ParseFilter for case-insensitivity (#2784)" Will re-commit with the proper jira ID This reverts commit 51cee0016f0caa76e4eaa9b44d45705908e79938. --- .../hadoop/hbase/filter/ParseConstants.java | 7 ------- .../apache/hadoop/hbase/filter/ParseFilter.java | 4 ---- .../hadoop/hbase/filter/TestParseFilter.java | 15 --------------- 3 files changed, 26 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index b9132a3ba295..9f52783dbb0c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -244,13 +244,6 @@ public final class ParseConstants { public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', 's','t','r','i','n','g'}; - /** - * RegexStringNoCaseType byte array - */ - public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', - 's','t','r','i','n','g', - 'n','o','c','a','s','e'}; - /** * SubstringType byte array */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index e06c6b5c4139..1aeaa13f5a93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -28,7 +28,6 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; -import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; @@ -813,9 +812,6 @@ else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); - else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) - return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), - Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index fbedc1c0e688..8dd15be6fc63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -24,7 +24,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; -import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -280,20 +279,6 @@ public void testQualifierFilter() throws IOException { assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } - @Test - public void testQualifierFilterNoCase() throws IOException { - String filterString = "QualifierFilter(=, 'regexstringnocase:pre*')"; - QualifierFilter qualifierFilter = - doTestFilter(filterString, QualifierFilter.class); - assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); - assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); - RegexStringComparator regexStringComparator = - (RegexStringComparator) qualifierFilter.getComparator(); - assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); - int regexComparatorFlags = regexStringComparator.getEngine().getFlags(); - assertEquals(Pattern.CASE_INSENSITIVE | Pattern.DOTALL, regexComparatorFlags); - } - @Test public void testValueFilter() throws IOException { String filterString = "ValueFilter(!=, 'substring:pre')"; From 904b555edcdbcc63926c5c01138afa55107dcbc2 Mon Sep 17 00:00:00 2001 From: scotthunt Date: Mon, 21 Dec 2020 01:39:13 -0700 Subject: [PATCH 283/769] HBASE-25084 Add "regexstringnocase" to ParseFilter for case-insensitivity (#2784) Signed-off-by: stack Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/filter/ParseConstants.java | 7 +++++++ .../apache/hadoop/hbase/filter/ParseFilter.java | 4 ++++ .../hadoop/hbase/filter/TestParseFilter.java | 15 +++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index 9f52783dbb0c..b9132a3ba295 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -244,6 +244,13 @@ public final class ParseConstants { public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', 's','t','r','i','n','g'}; + /** + * RegexStringNoCaseType byte array + */ + public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', + 's','t','r','i','n','g', + 'n','o','c','a','s','e'}; + /** * SubstringType byte array */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 1aeaa13f5a93..e06c6b5c4139 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -28,6 +28,7 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; @@ -812,6 +813,9 @@ else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); + else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) + return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), + Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index 8dd15be6fc63..fbedc1c0e688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -24,6 +24,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; +import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -279,6 +280,20 @@ public void testQualifierFilter() throws IOException { assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } + @Test + public void testQualifierFilterNoCase() throws IOException { + String filterString = "QualifierFilter(=, 'regexstringnocase:pre*')"; + QualifierFilter qualifierFilter = + doTestFilter(filterString, QualifierFilter.class); + assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); + assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); + RegexStringComparator regexStringComparator = + (RegexStringComparator) qualifierFilter.getComparator(); + assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); + int regexComparatorFlags = regexStringComparator.getEngine().getFlags(); + assertEquals(Pattern.CASE_INSENSITIVE | Pattern.DOTALL, regexComparatorFlags); + } + @Test public void testValueFilter() throws IOException { String filterString = "ValueFilter(!=, 'substring:pre')"; From f8bd22827a51811c0e807fc5f16195ed9b4532d1 Mon Sep 17 00:00:00 2001 From: Lokesh Khurana Date: Mon, 21 Dec 2020 15:33:36 +0530 Subject: [PATCH 284/769] HBASE-24620 : Add a ClusterManager which submits command to ZooKeeper and its Agent which picks and execute those Commands (#2299) Signed-off-by: Aman Poonia Signed-off-by: Viraj Jasani --- bin/chaos-daemon.sh | 140 +++++ ...hich_Submits_Command_Through_ZooKeeper.pdf | Bin 0 -> 270679 bytes .../apache/hadoop/hbase/chaos/ChaosAgent.java | 591 ++++++++++++++++++ .../hadoop/hbase/chaos/ChaosConstants.java | 77 +++ .../hadoop/hbase/chaos/ChaosService.java | 138 ++++ .../apache/hadoop/hbase/chaos/ChaosUtils.java | 49 ++ .../apache/hadoop/hbase/ChaosZKClient.java | 332 ++++++++++ .../hadoop/hbase/ZNodeClusterManager.java | 120 ++++ 8 files changed, 1447 insertions(+) create mode 100644 bin/chaos-daemon.sh create mode 100644 dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java create mode 100644 hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java create mode 100644 hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java diff --git a/bin/chaos-daemon.sh b/bin/chaos-daemon.sh new file mode 100644 index 000000000000..084e519321a2 --- /dev/null +++ b/bin/chaos-daemon.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +# +#/** +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ +# + +usage="Usage: chaos-daemon.sh (start|stop) chaosagent" + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo "$usage" + exit 1 +fi + +# get arguments +startStop=$1 +shift + +command=$1 +shift + +check_before_start(){ + #ckeck if the process is not running + mkdir -p "$HBASE_PID_DIR" + if [ -f "$CHAOS_PID" ]; then + if kill -0 "$(cat "$CHAOS_PID")" > /dev/null 2>&1; then + echo "$command" running as process "$(cat "$CHAOS_PID")". Stop it first. + exit 1 + fi + fi +} + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=$(cd "$bin">/dev/null || exit; pwd) + +. "$bin"/hbase-config.sh +. "$bin"/hbase-common.sh + +CLASSPATH=$HBASE_CONF_DIR +for f in ../lib/*.jar; do + CLASSPATH=${CLASSPATH}:$f +done + +# get log directory +if [ "$HBASE_LOG_DIR" = "" ]; then + export HBASE_LOG_DIR="$HBASE_HOME/logs" +fi + +if [ "$HBASE_PID_DIR" = "" ]; then + HBASE_PID_DIR=/tmp +fi + +if [ "$HBASE_IDENT_STRING" = "" ]; then + export HBASE_IDENT_STRING="$USER" +fi + +if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME +fi +if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 +fi + +export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-$command-$HOSTNAME +export CHAOS_LOGFILE=$HBASE_LOG_PREFIX.log + +if [ -z "${HBASE_ROOT_LOGGER}" ]; then +export HBASE_ROOT_LOGGER=${HBASE_ROOT_LOGGER:-"INFO,RFA"} +fi + +if [ -z "${HBASE_SECURITY_LOGGER}" ]; then +export HBASE_SECURITY_LOGGER=${HBASE_SECURITY_LOGGER:-"INFO,RFAS"} +fi + +CHAOS_LOGLOG=${CHAOS_LOGLOG:-"${HBASE_LOG_DIR}/${CHAOS_LOGFILE}"} +CHAOS_PID=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.pid + +if [ -z "$CHAOS_JAVA_OPTS" ]; then + CHAOS_JAVA_OPTS="-Xms1024m -Xmx4096m" +fi + +case $startStop in + +(start) + check_before_start + echo running $command + CMD="${JAVA_HOME}/bin/java -Dapp.home=${HBASE_CONF_DIR}/../ ${CHAOS_JAVA_OPTS} -cp ${CLASSPATH} org.apache.hadoop.hbase.chaos.ChaosService -$command start &>> ${CHAOS_LOGLOG} &" + + eval $CMD + PID=$(echo $!) + echo ${PID} >${CHAOS_PID} + + echo "Chaos ${1} process Started with ${PID} !" + now=$(date) + echo "${now} Chaos ${1} process Started with ${PID} !" >>${CHAOS_LOGLOG} + ;; + +(stop) + echo stopping $command + if [ -f $CHAOS_PID ]; then + pidToKill=`cat $CHAOS_PID` + # kill -0 == see if the PID exists + if kill -0 $pidToKill > /dev/null 2>&1; then + echo -n stopping $command + echo "`date` Terminating $command" >> $CHAOS_LOGLOG + kill $pidToKill > /dev/null 2>&1 + waitForProcessEnd $pidToKill $command + else + retval=$? + echo no $command to stop because kill -0 of pid $pidToKill failed with status $retval + fi + else + echo no $command to stop because no pid file $CHAOS_PID + fi + rm -f $CHAOS_PID + ;; + +(*) + echo $usage + exit 1 + ;; + +esac diff --git a/dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf b/dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fe35c04ebbc369b322bbd7a92a5f1cdde157976a GIT binary patch literal 270679 zcmd41W0WObw=P&|tJ2y$ zsee*AeFtktz+VsqZLOTGZ5#nC06IAbV6&yNoA%G^A ziGc|-6T7i711qZ$i@u2wGb;-{6FWV-@ef00Mpk1Mp8sbF?Y}B;baF7(w}y7hG&C^M z+cDKM(9`=6?_K|AE5)SNQ(_RfhhT z=Kq6Na{8vmj(?5hUzYKYaSG}?>08;F{!esa`1|}nn*47~RT04U$14AA&af(Xu*yZS0kCr4OzP=tfYYcEf;c1Gw;1TJwKaNLH?PN7=kKDtX{a3Qj+w?C zNXdE*F%bC>BQP-*F`qFvDm?nYFw|_xt|>e9$;+t?GJy{qimBz~x(5-5ioPj8Qc7M#z_AU7SewRrY~UomqveU zSpV>h?GID`i2fnuAIbjBgH_=2{08D?U zqVO-eiu@VwUlJk=^Z;h&e})ucU;r@x^?H9uD#8q4`nOg@SOLuco|Fh1fSKXn0zUxE zjQ`HE1DKiqmHq1pi>5dIp+|6~RKLgxS03jVEsCU(~UY5*77t5pY0NZ#GLv(rF?j=Fw; zo()W0UNQ^-e(VZ3@DIMsF9?CgLbTNbb7k{0JVUM_>bXM|;&OOdRLNVyxoY0=`;3_1 zx6|8OpKnXN_6pkg@1J+;yq``lvL0*ho*%2H)At3pb6>A-vMIZC-CwVd{j#sOXJ1t; zpr^D7YRjrO1YTjuz=UujOyX3qo zNV=O2eRQO#nifRwxZ_xR8Cj+r`u;lHk+DE~zm=G66v@-*K(d zygs74qHi_lOi7MD@jeu!bt3WHI#F_lO~O$f)nU8uLmL*qOyHt1fS62jmDWf;oKyg3 z*1K9uJ8NED*|ds{aOC>E@H`+{V^Hdu)wAGLXPb4erZ(}y=(oCpY4g|+OCD!sfKpTX zNZWW#*h`QO=t6bvFxByjM~+gKbeqlirNwdpt9-3my|C3*eejiWS2)jhnM3J&MM&hh z5KeyVQ&JJbbri7u!c@2n`hwmSh1TTLdLg7G8NOY@Cd1A-%GOaxDy9rBxJ{b_0?IiM=HP$$vZ$S~`0 z)W<^lz1Br~xstw4PV=e|nSzHFx&E4Q=+Pqj=tJxk9u=V)!&XvCsq4Vj0q>#ucc?D^ zHpFNVUw+|*n4Yz&R1+?sL7RN%R<&<-CUwHJ;Qp%d^%%fQQIbh&TqGo%Ffn0O>z#$ooB+XT$8VzXKoOa|YjkVC{Gefn)Gm9GM zy0jPguIx(s8Pz!TVB6f#4*0k$K+HX3e`WS6gho^itOn-`QiF!*r3POYsz5K^V_v{f zAn1m36bd?`d@xw+#Cl%%*@*`_uoiy-Z-Cpcxq4{e%~F;aHF61%RVMbSLW-Bju$w#N z_eIrp5kU|6r^&dbkuKoN&9{tTOZUiwUIRZZIhG6b`|ac1C=`&kYTAmvEn$Frz7nRx zEeyCRiF?vQ9%~ftID-ujL0>lWHTgH3y=J9XAEXiH$D$Qh=4V!=r>Qy3uIed`EF6s# z3{QDlc4i+kC^Rgi%N!pQg-U?R863<(K#iSOu+?n`PAqm57D7z#ziSOxwcSOGp>K#Y zY7{pX7T8WJNOvb339ZlkV$m&-F_JRtTKs<9vV0r$9S9N@m3%FF)xY-}e?IUFDIIh! zZPakakix(%ALS{?$SC*>Md1dta^_DEl|(8qL#hrMWsZIDD`7AzZ0FIY!du|MJJ}ug zE%lN$?}sXftOejmqd5SPdg$ba3M?{G1=OU+%;bGzxD48J zgXk0nixm;h!2eBTl($NWp($S68vGlQl2rnP*o?))6;BpKTx`VRYFSzDCO0oXGY;A_ zNag#Qnwd+F$1aF2Gqc(}3Pvv<{YWvPxnofojFFkp;8r}D)4|l}ogQzquMgdv+32!PL3&sWd8X_Hn0bVNx*tq_l=r&9xFf z9YpXR5RxFH8p?ro{vROnFnEwhDUJT zbMyvtlxVi*?rW5o5IG!}26J9IYA)dlFtJ^%$zi@rBtbQL_bJ!0Coa3DZBrk+88ioJ z63Tt$dA`NY35y0Cylh{q<6xP-k{nPi(}ZU$G#>Dz=>{-ovW9yOr5|PTDPAohzk!LS zgPV@z_+H+o6inw@jOg=av2S~&Xiq;}NW!RS;18VXMGUcP*@!b#0%J3N+ESGC@3{-1 z9!d-%!TZaVP}AElBpGy`#K$Y!k7wJe)b-IRTA?ckPr|luEze#m0|meTNb71f6o`@k z;f&}a{SEO!nie6VZ8Tk{+8RD2S^QVlqIZe~s^j+>WyShbg#*tmamq-M`C9MQ4d9m3 zE9%QYeT=wP!Z>6%9w0hI&TDlJmz{l(Lg+Zm47N?W1IJ)N z%%QxxP=D=7`UYe|De{dE04y@P0*>eU$?~R~mdiD+kp9RH!Nt#ohjc^+v#6FHJ<+gQ z3VKT29Xra@F8jy?q_D~MGN^U`_G(t}58OVPOVscl2muIkOo`A(hfz9>kD#d6{Io+} zbh{R8u=L@yW1AS%1?UY$t6{P((r@cKAjG6_G}(TFEx?QLK61PM(qPJRaL-QWP<$B&k)PO^&~(e&yc4I z2l{F;&Kf!rcnzOE+wC{ZG3jn;A1cD`G3o^Tzx8 z%5os^j0PqY8wUIb7hpcg+IpjSZI+Sqowyu8HxDD@5u68$4!E@i`e?D}p$~gUxa?69 zN_`J%m9Dx|0$k`-nh!WFK9=bF+wz{vk|2O{l7QN+4tT)QRqVTy7;VCLNA7NJI#?cJ zTjg73F`0HBA*chtRMb2Ju^RxjNUMN={x<@@y)EA=fr9BqRyT=C(m;F$S0Mw;qJwimv(z@PLw>iA6kZ!%P| zI6TE`TN?Z*Tz+8jngejaB5L|bp6zwD#F6N>G}dgtz3F{BIRWd2Gl^}SGl#O!rp2*|b*_&O++d*J z7QsbnjopSgUboBBsHnz2tg!OPYI`;#L@&t9NO}h{FkcxRxmgA4%>Z#AoY@dO7qdGs z`$1YwBN1UImk$ITHP2I!Z1vU3jzrEN@P3_wd`Ybk(1^ugL8kSrVDma=Q8Xq4Y~W+@ z=(&WmM;|@y?XyKt8*|OLocrlcklFsZhc}7mAa*Yz(_?v(u9U^F_8MV*U-}A4LPWt? z6Jl$ZmVw;KVb?d&JD)P~drgFYIaKM8%*#FJ{Fy(%V54z57xNA(8rejK=j(l=TBm^{ zQ5v$rb#O}3X#O`#%I;T}?)>&yV+6Xz%sgz@lf#wphGmM+)+@`}6K&@k#usiV@8s~c zSArSay%rR^HhDUPUD+j+!5$A@EaLlB5e<$9_Bm+B>G)R{_7{^g)k-3Q3w9#0-nv*a zx49E5O}|VWNd1&gxxG@g8;q&quf7OJsK?`4rq&)l9v5IFo4V*EA0BYFUZ>_eWMGFB z@TcPM6Y1_k&xge$AYCb$efF);2li7Zp0!kV#&ljhmC1OV+WXKlao9iGmYcVdDZ(&{ zuX-ljPP8yw9Qm^lMM9Av*6{;&zui;hfwE#q38aHWM0}Wk_n9FDHhrJA%X|Z3)!5^X z0HbDrh6Ze0G13Srxpb#G540rAZRA_ht`nARZ8n28ra~3U_anr+=KCp`5xsbQ>CngE zdLZ;7LXIALVzq>0#>hH{6{L1Yg5@jAaO!D7(!#Td#o&3u#z8{@Z899wIqILM&|^wy zN`l_&`JL^bnVEW}^-1X#Lr#ZZK#?|)1|%!Gbz>m&I6_?c8a7=7O?Os8QVjCY`<%xg z!s8+Y9oUtKBMH>fxL6z7uq}{%rckBkNc995>vRn^`|Oe+m}n%}Qq5i{Qt+ACt?q+= ziCp&lfpcULvpwKw?V9$C>n zRp~PDJEL+foe@3Y@*39djiIe&T&I5XN6@9q4czk4h43dscf;}{Pekx}j&^@!D|l*m zjN~P+iu>jA`p&#g^zM*T;l^F^5>3{vF#X253rz-_A!`~Mt4D{%afX)z zWrlJN7434>rsw2)>L9{c0{qME?y9?RI|)JY@r`;5Bxl_Rhn2VL`kan40R^(xsrFO~ zOW%SYsEO{U^rAyIspwo3dnWKv%1^CF!^rGoltIs%hW@77s-JrvOVQI%5fMGEPrhXJ z>L33t8ulj^`kyh8|7|?ypPl^QaKxX`!+*qe7}?pG|4U$JFvZz9*EHEEew2{_ zQZb9uCSi`)KsPXMLWnR~XeLe0#GtMb%3L5QKM%#Q)TT{gQQG_=q)C8HNx8JiNl;Q* zv%ySRe(knnx|7N4n)a*9^~meWJ=t^0aY}p29q>&J16qo*HJH4V4v{D9f@0r-Ou|Jq11tG#1%gDmC<)@Yy9SHC)+$r!cX_D* zmC(1@l0_W}s^Y|oq~u!epc@u)Txz$y6{6AYJY*qgwKY1lgl?m$XflybS+yl z3^%-!Cm7O4kVfLz@@3G8fqRLtPo6hLbDx9lxUkito%gu_BHyA%cu5vOd;6Yx3QUM3 z?b|oOHNAvk!fbGggFq9FInD(GlRkqVCdie=kIY=KJ*#BMl|FCIEC~D&a%3jnoey@0 z@_i;kCZ9xuNwZgkOwaf;AOkQvZ5GAqgu^CivX(mhV@QZ&>@H1+HxJIH2%o^R1MK1i zf_RUWBY>EA{;4I&9m6_KVml9ajmcIJf+NVtsW4EC-}Du&^h97gm%EK|ekKY&7o=_8 z$4LlbEmY=2T(^M9gP}c_%{#bCtb}eB&ie$$NmTb0hkGXGF`o=)LT4nG`y@*0753C_ zw&Wy``W4hWZt{G10}jIB#PEKYZ!H)WHa}}*;bcJX6_OS<4|hZW&pQlg;$lRE$J-AF z))0QcZY@MBGWUrAlE|D2ykAh09yE9&Z(?$!pWFlCr6q7{kUlccRW8^5$I#q51`vK{ za2$EQbW2t%l1*fRh@0F5wGfnb4|(ET21b;F2Z-yAj0divTrbYJ*0k)An+60vsT@dz zoG|o|`F%mUjv#}D8I1&eoQ^QX9cp4;@=`8NLYixSw8}oMR9=d_zo!{0B}0@>pPQ7a zOi$-MVVRsP-Hy#f;jw&_`aO$-IPAAC3oFV@r;T9^=s*)31ImjcAJ0>C6bQ z2G}S1oFQiF{1STHpQqvzJ>QQyC&->By~6N@rMRSG^wIE&idtm%t*DDc86aY2iCXYw zXZlmey&&(diMhjG+|n_77&QXGCLn4M$IThgcGWe42GvHr{8|wuuW@L40^Q<(-5}{^ zy|^lFkSg-s-B8NSX_F1$He-fU9j|}r6s@`eTc*kGv9uw`GMMg~GtWp-_j{BLJ9JEW zg}BqH>`15ko7Val8{sNxOWIY?SOSiE8Y-p~g0^rqrgjVrl+D zv#d!X-9_z?MZTeIl80L|;Jjn;5?hq33A_{m;mN10=$A?NdsrFt!fiL_+Ug-w3TL~) zRMrEzqh&Tg*bI+a8Q%z^`-Nl6Fn-P2RjAMrSp5t1%7FKd+k}C2|JzF~*!Ij1t3iIs zzBjDqUBs7bpe{1HrhUk$!O#hY6@UB_c(0(GR8IRu5kquG6rLdl$^*LX>4RRf=inC# znON+cUE{UOyIO=}3WT>Cvb%4zGjd-2 zI03dd=x{^)@6v&GkYDuO{Ul?9`iO7?Pr!+RL;`ZGK8PGgF#@t6*9r7;XwlF_t#(pD z3CP32;fy!+VS1m0gcKlTL20l1g<*scGT~;wLv@8=@7Rzez3nvyq>I1g@5&_0W9NM+fJulz!$h2cy{ z#cibP(s}zWdgDRKudNJMMx7QO@P6rKru?Id7v$R29qh~HI)OP#GOIXk(DofZ5wi)~ zR0}@$+<^PNZN9Dll{O^?AdAywfAR8#H)LgqC)ydaou2fbjepEQ4v=QT_w0+IcL!U@ zY2kwM=~sJH8@$@2+yoM>e(%T=1V2P~GT`jRxdRs`W z7Y=A1ur>uf|B$q5U}x+@!82h#aR#QKWgn-a5Z4)uB{nrJDXTj-48Fn&R*h3ja_)>F zbZ22Z^^k?|Oy1Zzh4kR4@__!C7=6EhGD;#jW>Z4V64B?ywv5hh98;J%_dBq$jX)eZ zU%h7i1$s0|?kEvuH%UuE#ag}R(AmKg?r21cz5WLy3m9CSNmY7laYIr$me6{aK>5j9 zn+MB@UW7`!@_jgMj_&BVYkQ_j`)0vuY-wXrkTK=1hNeO*789Bn97P5Q3^on zakp)Q=Iq;ZzTXxqtnT((UuP3rjLs6g*08?!s|OhOS#3ZD5SC>D--%Z;KPZBzW_s16MtA7AE zvwGdJFI8}4s(yIo)wW@A6xMc6j|Y;gmJn=L7LF=QbYvnq=>->V1>OQTyNI7KlJF?{ zyK0?Kl1v^d?Z;~YRWo7at|ez)ZH;8)_rqxupS@0X?B4vcC!b_`DJE_Ow($)&xYPJ} z_evJCDc81lb9?2gYhNkYB3aFE;GEc8ZNzIVce7%1)3sL-#mhE}-{2_UD%he?5mSd{ zC(43D>(WmDwKW^*G)hy^vNzNj#I>;Id{L7tJ29am?eQ=-VO73qh=BJ+L~Kb;YM5`( zR!jp!RW*cie%u{`cCP|rLDm|PwG8T}T`(l)m$&rkn91QxD}Z~4O8VepqRViK{4!~r z6`e7w^jljh7Ue+Prd9263n2l2|xYAzPrtCX-TvPafWnz|uSw?beWkx)j zRt%kXM=PsWb~WjA$NuC!xZm!}vB*dDg0xGbS7CCF<&ZW6ZYK*~Qo7Z=Q`W%+N&Qv} zq8Ny@ZoBh&+m}A=!fQ6;d7HSLPSN$lBy0QU4<1@&%e#zp{$IokXVI3G4Lb&5^jJuv zgE(j*Yfl&kEG1x|MXkY4PSSn31@5f?FTZssW-)VsTO#R$@jPC6_u%5 z`bTPGX)wz&+t{in@3c-%ds*BcXYnn~4pZD^t=(9(nT|EI36_f+%@X*Q0Sry1lD;;z z0n>{WREHHtgt|o%nYCv7rN*zTOo?o|8bwDtUp_x92w`uU)B=ruvJ$WK!MUt&8q-+K zm#kVJ8oR7VU>XcE*XBzFVJ29K2*%ThEH4xHm#ISmfT} z5dF(?CLC7SH(`@Dk+gLLH#?6R)r)pS&MD32fb!3aZBkf={3SR6nrB68l)-2tv`+02 z3pA`M>>ZY~mqon#kFia1jj|#3r6@#Ti7MAA!kGAFjhkb%mZaAuyrKjR^|`gc^z?&+ z1FVOcDXsXxh=Uz}U!Y(oy!ZdU5%~KB@&DwL9wQ^ue|huQj^(!)pobNH;t5V>`q?YK zL@*;mVKi%Qz6SE6DIN=Jm3)e@x%xbWLP;<1{R??}Zuzs6EQhb#z6OLem5Y6d#mWjv zB$KxAb5UH=mY-&4p;LiekdG^Mz=SdfU+BeF{J+EW z_v`Ba0Za@4Mn(qu|J*P!F#b84@LzDd@PhJ6TwHu=oo2EmO>k#SBTjfoqe(C#CD3P7 zhZ;?Qi%UfWO#;N#2(uCZ3E6`vHSCF;(Q4Q?uO;i}{}8JzeYL2BQES{&=#kKDBy12q z6FGX@=43FF-i~>C>)O8h@VTmOrY8i<$o zd_AqiWh(jdYQ;SS6Dnn!;x)DDZqS?}uJet4G`?G|aG8{T*#XF7tw|Smeb!IESgswH zhdU%Aos#8nyDIatk4PPOB*r*A)-`%}<8(bp6S@YG_cxpMvLE}ppC3r%|6ZQ`$`gh; zrgdY`ePUgkhVWe^R4UBMd{-sBAWZcBIi>FHGuW6)Zm#)~nOvF5+x&Ll${qVfO^MHK zdinBE+-OcCG?_Wt=yk~sGfviA(77r~oVC?$u=u+rCY!+G@K)=C3jJ~V`Tf+8eB1wl zTSQ8S@FU`1(9{G`do;>Gm*kb-ubBDuxRpW3 z6yR$3^@tV+zeKR4zp+AD6?^ExNJoApX_Nx3h(z^>7Xr!V7e6DVQ(qfd0LkR3=y5gz z$s(=rC+vlmCddHG6eaBE$e~};OBYa#Rj zWblA&N$>&o64;^jApRu%2=IaSiuVD3FX|~dIqo5d_~t9TzTaI?0>&dKzf;c)`HrxI zeUHNp?v?HG%~v`GL>KWHJ7Q3`1pO3hN3sFtxvod`1pQ{&jEx&=L$DjPOG0+gSF*c^ z;6S%Pm*A_dhwa21AOg}Q{spuFWWzZ^m}*?GIs&a2<0`c`bXt{!Ddm>H+r*)CvC{yYCglXO6Xcz$6XqTJgXfd*BkPgCH}R3^tF&kOgnNhg1fgg8^yAs~1jb)? zj(UUX;qo@|HV)$tt+s~Ex0{uS}rHskZ3i+8$s`{v)-t>Tn-goe}5FYEcg7jDUy zui5h_^mvyCwqp6^$C z&Xo^cqTBs)Tp6!$q1KDYigVv2V1qHjm!6B{v(bmbJv3ziVZ>Z$B1Mcy7U;47!PAwg zp*U10(a6GZpI#Ezu}@X3B6%$h7$M(OyyjYGWq|fMnc`m5Bp5HIBxZ$ptp{2U`N}Wl zBxu;YFKc5=CSY=vuex@y1uX3zaejwHMnA!RySNvgCz=8B@eQpltEgu?voAgs-E|5C z+li#%&2^<;-KpG%s?e#ojW&iCp7Vt=V{j_+xKD&Gw5S!;^-i2)#HT03>D7&nYlur@ zG?F?Bbu?uNKOgHu=g#oPG*1xv`6^@~rYP6udqtfH{)w6w(i8xWrv$k2X{WezJhMW< z#y>?PxYtRJ8-5s!D|-u zxHJCc2Gc#dSjK?K$T2M25U0Hfsarp5q!YW<)^x3-?EW2fJp&sjX1wp_js6qO@Bs_l(c<=e6g?FmSJxwBVCg&?PS-YUJ(=-#kHTZ0_-y z`Z(uW%hiW-b@OJg5H8Iw^^#MYCGLDrji*K)<*)ne@_vY#Gc6f*=1!2g*Bxov0Oz0j z%UdRltNM?@2Z>d2Hi<6cUj!G)k0b65 zd^Sl}@w#)Z&bDV=j8ojnZ0`%#yDcPmk*Wi1!$_L_+qa4%!9#3&$&&%DS}}{ZAtf!y zo}c8@8`v}LAA637OS??35tzBs>Zx&|?&<_oy}zgQm{#4bklXz?hqDc@Y6H(L!wZc@ z17-Qj;)})VizJI;imDHZSE<-WkH?!wx|6yWDi=eq-G#nVKf`u&YkThJmNla);buCRH(KfQlSW(gO?&1McIviHmKW{!~ zY0BxG<{3;EqfAOKOsL7HJ(Vhj$YG!JG7ofWyeH?W7VA#fiu(k3Q&2gSRg48YWOy~b zgSaQ)#Nj9Drfko=I7_#UwiUGs*KY~u+@on z8A7q?gTt6bqa@@cUUrz(zeBu~%oPc>smyXAshpFzO{CmcpUyqt&)g}ndI8sBAGXfX z6|EDKmB*8vu>RHwSXBS%zD0GB5;@0ecimzMpEXo>V%s*q%kLxI{*X~ql2GVdzMiFYBeEDsv<9r3Czw&kQ*7%&M6lyq)g8E6xCQ3ihHn?)ng*mv1A6ZX$AnFsQ zbAoGSa2eK?NTe-0J9fU5uS-{hZtzg|Oe?G&exyWi;irC%gE~h16#sxYhw8g*{}X@T zccD`jE4++g@;4kad!KiliFcqCLK+@UpNuX|{6f_g3hJ!vfCmQyblatjPz3)w`Hg;8 zGsgCG@KAJs%M0XVy*WS!HLtskHQNcA9Ei1m~4blfl z+g9YdV!+#ckh)X|f+nf+xU2%qxJbntR);E!jS9NWC$L-Eojz`LuXh_h&jBh&9||k> z?7YrKrqssW3&u7m`$gFPvELhz+VJ=azyfUtm%`8cHNL0@%baCs-{sL{WtCv!R9lEx zl$`cRxsS9)C3DMb0HI|ZBQK(D#ic{^+>g{rq5S=OJ_de=CDY}J_sKHMrdV%=TIHuV zsr;y&X@`ggHSZ|n4xD1HGSki~SslrqZSgDt`2Ke12G0UE#*D>a|d3tn+<9pP&0xBfIAca6c&AJ< zMpA=2*+U!Q=T_L%RTfukVoIl@Jj0VFY9|tU8L4*)+1_tdtEwvpi=Fc~G8vlQ(kKVU zL1o#8B9bcBYN}g@@?})R`r=8U&ovd>W-14<15P2I`Vq}I*Y?@o#UrhS-w`!Bs+vY~ z>B1Rn9-0I;?Ifjx>vi;{4M9z0>amRg^wIlAThlZ+DWk%PrnZakX%(qT#Ov#8Xrs4H zx0pf)0Zpwigxd2aO^rt_o0~->NK1^{oOA+PM`1MNYQKbGkr^64Z5(})*3sF z%;s4bf{z}+6VTnzC$hMi2&XTO3=6>-Y%GcI0s^P@g&~`pCpvD|E7@<0S&t3S>1@`V zim2+G*CuS6+t^)P1;Jk>z!E&zr2A%;^^n&~+Ue69YCTPYtOsIgyS{go4ZNaxth-5H zeODbgw|=BuYbZn)P3WW$D5-K5t*>v)a?>5NakNfnm_LhVK5}b6ZaT7YohepahBq;< zs<>)OOtw;0K*nz89%PLdZ}Q=+w`-6-0Og!+%;9LOkDMOyWJDp{OTX%qGV!P_#)5~j zG@F0Ns@Xj?IYCwTLsrLOXuh{m`0d7hg+Gfc@({X0i7rtZrYk1 zS%$UNJt`W|D+cIM9#{!r8pOdCYOQN9l9p;4U$1WAtMOh4?#x=(Rt>puZGQpM#ho|9 zgN}QcHz6IPO;154sia=Pjq`9HpJAgpExAd4*lvh)xq1*tz(=+B_82(8|ClZ|Dr}@F z`&kqDRp_pt-|5}f?7dczR@EfkN#wcCv(H&1`=9I43F0k}P3;}ZECl$e06_U%OqWq)3h%cRq*2GWZ!7AJ^sO(b@$KX_UQ| zc;3~CZX(mZg@A;(bo`_hCU8S3R&(>hWo{zHc=;#T0_p_*uSnvr0G2q+=vOC)KPSq5 z$1J*1ik@*sFDeqGm`~78bVoMd6CEM6j(t zTOO&bOok1k120Djv+0deAT3|QS44j_PII}Fr_27a-nGKlaB|Fd<7?@%;8~MZisLzd-TQzPFWct<>a{Q!Qmlt`Pg`p@_>_a()WaSF!$C+B{LUi z>FD^$I_!8C%|}gQNf&8qp29T8yM6-!7OlXEQ_S6Tuf|_iIJT09*8A|02_Zulm}F7o zWmVy|n1Pq!mYEDHbqpBiTx<1=Bg(CcP*t!#OMzCh-YR9CO%qUuM+l-J(4Hohr(^c~ zAA@7{xiWE39D#e7x>Ft=Dz}D({s6;Er|RL6Y5K*uIC=TF%c=@UWz|3bVnRJxXQTZ1 zWn-oAHhP&2&C}$k8lTc%g`xx^}z(xYNhwCJ-Ohb?f_*eyVQ;1;9^H{F;y?V0PD zvg}cBS@@jYE@Lt!$@xK9@fL~HP2U>}D++6cROd7{45;K_3Ik%o#wMbUoR3N-IMBm{ zOa`gb46Ra%&EU?ki7WG(`7PYYvx<(d7g3#2S%w_dWMa#bEJ`^Fmpj0AQD2EXO0}v_ z+wVI)vUn^FCe3I1Z?N^9q90*FIpN`Oa3Md-L{Bk)Kzo;|Mn8DD)P!PTUo`eIHc&l` z%?geY9Ing;j^!;(tY!Nrxxm*5X$e{;p-V}4OL%iXfUb_+9MTKtrMGF>#uJ`dfw}v* z`VZ=!$&SiSmKde5=eTz`J2>-Dv@Uri9{??!kRBl0GX>>+Plvk$1!;814UJvlLwv=B zMe8Acq3Tx#OVV=X-cY+?i;7z-i;x445O@+?Oxy5zfl-8ms)~qpx+UU0(fDqdl%^rC z2;f}yy$u?gSLtf-+E1QHBo1ZaB;##pXtY$BTZ0O|teBfA)7Qluo+Wi`)bw;R8fPS>QclQB2%4qwb>t}OG&WP|YFVeyf>^v^Cy_^E8c1o!A7 z8eqhdV-B<(HI5Rh&%I5MMRU7MaGG){mLmr)*+CwJjtFX7bma@$w9K9q-Sy z7#*e9<%U+Q+hAE{5AM32_iyAF@m8Q8X(Yp_K+irU&tz08ItD1}U?6(n*p2HF0y(#JhfIHB?{hpURx{az% zwl{=pCm7kp-GTEICE-rc?XnpvxoTEXBw(uPzH%OdH->FlM`kzC_M%=nO{65ssZ?-{ zAy8E^ouxhMa#pk|mJ6f5S&F0lX^tQy@vUmj61*pEENc}BTBlANnNc*yS&|IcPR_cZM%V%DYI|*{h}E8|!ZL*-4KrMpu$< zniDbC-`$%=_nyzAGA+bYD=ir_xK`ECe_qYb*&8fzsGvJkF-uq#Sr{3MgoushqNXrg zo`BY{B(pGS)~`{s$huH=FWzVI0kC6+9#-5i2ypX33a%rN&N9IdZ+NKKWLLWP3kbZvbu z%8`3&35}$#gUTrTgMHN1!fYjcPGwm2?Yz<*2Ne}zsU*o%*xt{Z6(P{Fns)OfA15q0 z&*n(VtA2d(6`D$DFE5n6Rw*pEdS9-wJn49+&>Z8uL?7<|EF-0E(Qoq+s71KcT6bGp zyu8_;CdYSmNN%dTn7h5vMAv-~=x?pfP@(R&tSNmj?=CJ&LY*&ZFe*ASz5Yc8$#x>R zP%smlY^j@*zPe&*io{ueI?5MOrn-X+hn8azRx6A5K%Y}~jA>gZs~kJa*l}V$LngmB z1RhU=C8TC2G212~11{OG&#$e+&tj`#^20!XSrbDYMTi=`?6LAOOq;fzZLPW)VHIKJ z!DWh<2j|F1^t8p!;uvv?8RH!Dxk40&x{4*>dk|6726;Umo;6^?&wo+~gWn@)t+vIlx%^xZrg zuANRvG@-!{#}=livBzd#&TL3W!CM;?P4;Rn9(G`S2n??jZyNwxbfAX z$f`Yk-CG#OLd3v^7L%1!=k2*j)oE?#bNMd8aSGF}f&Fh82 zH2X>p8ao7riV4+|{_UP3IH^AvxlREF`v79uCA*=f^D%oDJ4G4Go5zrU$*aj(x$~`0 ze|!&12}28WoY+`=h4{V(mo95OFLba1acRM}wDCtgs(lo=eQK^R-h_OAZ=5Gk8&8=^ zgn^Dv{#r3)87c>3pG54Y)hbRRwffRR4Nc5#soJFCZIS_qsz(rMaDTv23+UqYB^ussjSm7*%w8OqEd&R$3x|D2@1-8%7g!VpwC{QkT<9cj>l^)guWd9Xk|Bu~{Zn7syS6|kGB4?h3eExy_cHTh z19MdQIWwvHUER-l?TJ(^6(*}p3}%v-@k0pRuieb1p=63m4#AKT`^#3_r3FfM^}6Yj z>Q25nSL1`d3#E=){6w8$%-}+*#?|Cory}_hwV${WI<-?fBX(S>*O^~5heU@kgK8Bh zpcaW`JxR$K9c(W?2Cd83^<2LjPZAzf6zx53#giY60!?P)Nj$}hXyV5(meq$kceFPU zYglQbnpVc;kZ9jLXG%@1N9F75TUkKIO*$?IR}>~Q8cFuAT-vwrnb+<iEuY*sqr1TbJGZVz0o}G(hvmva$^a|2kfwLlTgli$cfF=rKlVX zARHBeB;Wxg$a<4v^e+9=cmB6`ch*rawa@tA3Aj+u@G=E@=#c@oWT*N+FLoA>1LboN zXgl|@(GK?uQlCefaquBtuA3NXC>m>}n1}*)MMqbECm?9_UP)g>rHbm|DpWKTYCjs3u_)spD}B zDKxUT>WJ7mLr+tv=&9ev}m#}{!~eGKBc(g(3z9QmY@Lc%Deo3}^yM1}$> zs+W%x=R3B%a(>@i(0wWr~1=n}|I| zwfM?3Qn!2&;469~5&$=n~OD%<6GOSPrQd>GUwo9?{FYl`4J zHpE6-I4m)iH5Ns#O3xIlrx8E8`ob`eJpQ3NhfxV02fK$qFgnuhwbZ}9*Z9&+zV6Uh zSD9RUbmL39?xxk$`)!p~^A>89hjx}#gbLl1P*H%D47^}VDB@N(sWNib0!z`Q%z&F< zb=dT}+W4n86Qltn)>OZx38FoHXYTNh zZQHhO+qP}%j&;YjZQHhOd*(mhc`v)0_p;f|CY?H6l}@U1I-SBf->(%s*VP{YUryd& z7DkPl!QN7W%_5_$nxknPs5dV;i*wq?=v8Ba-K%V+xOAy9s^epld<1JTbK1-3RTP5V z>>P>egw@f}_l_g!BQH4zOrxJ@^;O|MOyL8v&BPRkivP1&e#BfQ*xSL;)M1rk40X`5 z`VuMO3VW*UE5urBUmCIAMs<|-_9!dvGIK7|E^3*XK+jwxVU?GJgd+=Buw*|jwy+}`+lSZA8)XupLWPs zL8?l@`1u7zr?PTFd%^nHnOwstRbR~M!8HCV4}z_1F&CC1GU9gtwB@k*(SAMXzIa=L zsabK^;ygO?iP;!5*>Xp@^KaTuam9(jI(@h^N>Najh*e)gEi~h3a9O% zKElFNZ2@wu8gI%adJYr*eyH07@jKc!fXWSfir1*p!~5F;cPfkF=as2Z!*5K(b*z@C z&IUGc%85VUVPvw1T0xWv<1@$Mxk}`9hn^q)*~oVc0cwyhRZ+Rs!!l*D;d=c}GwhMylDZ>Cp$6WF9-&gs`owg7P**r(6OGV0 z!n)u_>zoNI1M2^x4q-+Srh5OL%nE9XWc5CrDr*MZYiq(j<;WJvuc5>>{<6vtCJ5FbJc&&DbIyhMK#+U z_=ex(-gx52+aDF5_L#?vws6_3OMR~84 z|1k6G?a!AB7!C?WS{VXwCf_WVaKE1kNiaeie8n?5mf= z&80tO^srzqY$dem;cmB4$=?mA6(uMy!-f}&mvuqP)7Wk>W6)EwmRp`MAijfs?u-n`m^=IQT z6!W573?)!Dt45dD3oI}FSZIYpR_|tW4Z|Yn;)D#>3C=~9Zp z%F42ceQ`$E+K3o@iBdedXz>*yo)G6>5_Fbc@x+TMM!~wfro@nl6Y{G5n2L1so`qH7 zLgHd^QH@%(L+nMnwBXfZ5%^bP^LzY~^s$JIYbIzoOm}J}F7q~;I9g&CtDF{Dow6;m zJ+c}pmh+TQl7j1qum|kOK;>RjpT|$8ZJU*89h3Eju4yD+Ly2|Ofqr+!Y>O(LTXza0 zDLD0Ao(%>K3hP_{CKWCCemt7y$^=RRN+QysxV=@|vu7XF*M1w|C*Ae zgQ`*2NV8`;|~F!p4){Nksx%>UI{AzQ(=rIu21BPim@uN%SjSYjoN zS_F#_@-=_T1Y#Q?%lWII5R$wnhO_5VykYpkMEqCC1gcSeRabM_M45IRL%XomjgZ_8 zf>YPOq7=NuTc|cioh>3{q~bb5U&WfOQoLCk=2!{BKPl|t+8DJ8hs}_<8|ECq-1v^4 zy;TLD$A7U;l;XS3Y7%WnPFN4E=Rr%m_#4Fk_37DPm63W$dlI&Q~a=;FQL;k}@J=G}g-oUAhOs`PtQ_q(a%y{_ZEtlQqLn|@X=@u)0uZ!IMZrckAb zCE}9OuTDOz_r0p`KB@0Mt^Zm#>Dn;4a@Dxd);IGDdT6ZY0@)_4y}zqffzf*CUnV2I zgyCYcBzmBgFf>b0k&OVuTuik_6?PQk`or&7fFpy?F&CpGP+x#UB`}|l!&WL|IyT%) z#Hdh&+`Kht;mBMIG?fg(AoANR;Itq>lVv!x0Eli>ZLXU;j;_Gx6+cJhy&pfv@gC~g z?K}}b^z#RcHTr9Yr+|Pq@g?nhhA^w%Sb(q@OD1X-tN5FW-{l`OFVypb`L9GHIFa8X z_;5coPaHVn;kG$HXdBl$8|>l8TsIc~b<{HsNE?r4W7I@PbVP8b*4=vFS-H6wY&v*8 zj<%p*V*#|WNMiw_u|?yaN!-v}Xi(?Hnl)FDYXk3}ziHFa&!Ds1X2t|O%EakrN=0$r zij6^bW5l$DW8>?hsPp5R?*1d`Ypw($kznI}%kVG&0a8-eYTk7;kg{-8gIEoh#?z72 zw(@wOh}YMTz*l-0+EFYRZeUWDbv5V&yeq5UV|zRdWsUI)SBv4%RVrp zmZ(o#y#h~Jys>W?zt|hGpF(=spD?~PSyUUPa4tvHT8s?iyFJdEc?tGTe4Qi9;ZTN% zL$|fwWfkmTK$f&V7iW)D57DrRTZO5L z(qqNMiDSm}XVQq@%i4~&eofH#H>lWPAxJ?n1+s@I;Y5CUCLbw}q~XQ##4@%i>m_Vr z( zL`QRqx`)TlXG}lrfs2?RH`2_7Z7lLb57jyaO=5Cm)Ahe($9WznGA&{nE3TxK29c^Q z{S5roQUT3BtmnHR*7MeuyVj*SpERt^I7?Lwze_Lky%k)G68}|D9rs+F5HedPj5kI|hOgzWoL2rB#%yD12J1OM9 znuQqYIRMD!wk*mMpfR?T1QjAG`Ba)Wpaovg=PemZA8t`w~shOp&%f;qJr5v{01KI=3lanrr*+YM&$d0R)KOM>fmCp5=Zx% z!NX56YV}&ZE3p{qD{q^fSZc23qf#V z_Q|e!doqg-E3%M~!zuFRf)E99TYCk!0eG;?U}6neZTb#{)-`DsiOe=Vv`8m1F_Cjj zd=Iy+p>z~jv}!PK{IQBX^WeD<{3q1&Mum*?B#J0!sAVw9rt#VRARBsXH#6_KCK$#h-Yw&7E|>3fK7(Ggm$p%n z#FqrddrI(#FDe#AT@yKY?$6O^|EFvJtIp^D6Gms`VBlc=-!wV{J3Tw=fAjTKmxsHu z$TGWk`StdDx5?SHbAs%=F~M0J2yj~fF)#u|1tcaUS1SUmf?04Y;cj37At$ARn7w_T z1UynQ!}U8<%tUSh5z{^=BRwo3<=L0)*0r$qvCsC)i&r;a_f49X&dFkzN=dR($ux(G z&jJweFH{ij-omn>q86PtE9zeiy9*dh=cLRt@arqPpWMGLuAJzyw`f;|$^5U7k9$~N z=1~Itu-iXbesPJGISRb@N8;^%?fnuTSPyPMoz~rkRh9n*Y}TNPs_Xn3=w9&w%wG$c zYu%Ucfu{Goj`Bd>+lc#1M3>p`xl_${F2$%Dd;(4@7|wbIQ}%b`^PbOhex#cA^ZmoJ z+U*y_v&EW-)bVupw)Z=66G5n$O)wXETG^?x)EwWox~9 zZ;||8Um)}yK}Rig89QIO7nayFm$H}JBK)l_r@ZjBH*QcStAd({PIkS(mWiO{DemL6 z*f{p~!z>0rOeVhn9>d-gz*vzqnox%_PM9#BvKL5fmBb!9FW6rUptu8GatJbtPqsfw z1eyoy4DpmJ_ZS2O4H2y`Y(AfhAsxP;55y!`0cN8wVh)Za2)>H66QmSGYftDL1eFKA z8Gh3Xg&WKZQyjo{M^hLCX8=}6Fd6I|>^sVj?k^oIa7#29$iI&q4Db7UEiQx}ToA&E z0Lwr<$RjR@9?ua0m7m=KkuWX=fEj9D3;zcmRRF|*0a`#EI0PchvKBc4i7Xdd5?<7R zyx%eSJJ3Krq84}@UR4m;0OcPMcFZjmIuHs(MIPdwc&Q-!pFO@juuvl9JWKun~5~lX=6jI(mSoM%w%2w#TC3}v3D`s~Z z?SNZOfXcp|J&$&{Jxza4KTIv=9;bScJ&!Z~<}WpZPK6-0J{AbOP%EUcJ=9j%J<(R& zy(-KPm`WLMh?YJW2)nQ+h#P(Y+A0VZ{5B-pe&UopQh&#;X|S8sZA&e}9*;d$ztz?htt+ zHWOV2h`~Si7eKt=bAx#8F$BNwIt0HHBm1cK!HWP^QXLvAfjMFG&4f&l7%w_T3a z=vxnW#676lpj#1`Zji3bQgMdHp=cH&6kw6Se~H`>Tcw zwqK7KK!DBfjt)!sLDd;>Yj-;`2m}xh65Evt#v9Or;0thr_bu<6*_8|b4xHJ8@2A_{ z8oao5bBEhYxit$G+w%$j_J@PV4eG(?hQ%ZNAnRY*rDBh|)w^98tgr{>6$CVa$@L=y z@AQs9$n}vy^n}PE)%3dEV)WnaIR%&OK?Q%_x&=D~(+L503j^@7zN9!mgNqmB%sD>? zXPn=9n6tiWa?T!Wer!_lLeISO8y`#7mz+$=x6;Dd_u^00uXrKWit{t|$5{Md%&DLA z+jqR|_vyE0$+ybFnctH0n}JLT-tvDMztXcuxbK36Z#d_-?r*q@$Cdx6=q;U*v1Iuw zF8o(>&&T=s?Z)|8@O}FDpO^Em+%hF>aKU^7+*OVR_{A#j^QT{y(}asbMWqj+d9oE^ zIL@r+sicd!4t?(;#s+@Oo-{g}rlS^+; zeBHr3an(aManmRHN58Zm%Fy*P+57L&i}ilRMg7Qy>a@as3EzwVY9DhmkC#QD!-!Z< z7Q1$AC46%j;*@u@C1h=+r(MiBfQU6Av?qGC*;ib&BXUUMi*rxV&RUImZOOgN?#KNI zAv4D#HGDlAt($3!Rj}HQZg!^8oI#)uX^msL(Fb6@+hd)F^2O;DkQ~%iRn&D`9va&0 zTQsJF+7tCCuorR%V4Bb&#Ir@tVVd@NVoG!dy%C_Xyae5nKT`<40qV@r=!9{a)7;z8 z=v}}qL)aN+Rc)hgk(-%eS-we1*;Wcj-9uKz6Ya&>vjHfUng~_SxX%_0-m>FkAaC$? z;!*u7ezet>DfXSzk9Q~c8^KNoEa6^AGr1&w38$mH_l@$Do11l* zkIMj?yODi6;%worzC5`@0b6E#u^PcLcZjB)4E6VpQiZSKL+}ec0n>zsx}>8+Fh?$r zC~moJe7#+A0VoY;kg5UIlv$I}l8Q5%M_235U!g&dJa0gsT%YMkt<<*YE&n=lA{}@( z1+I~{`%msgKIDB7`4Z)JC;$C1gH#F$3lR%83qCMxVwbZ<<4degu1?aspgaAR!rqVE zuXAS`uZ~J^26f!$ZKULiL-SJD=nlp9Kr`C|nGTEvkwAaf5y2EEO5>j2dA%TkF_-*2U+g{*pl~GEhuxl=YN)OWBm2mjHU^wl8kZ)zuD4bLMinwHj6M zjuUWy)UgYJm>y8pM)?lJq>XtnG<;oUBYV1c6{RTmhCWk^>ynqv4r3uaT1A5h%Zu!k zgL{~nkH2NLAky{4atr^N;rfY6qgm&Doar!NI?P$J1a1V9-Wfd5uLggRpiNm?;)y*3 z$A8ddY6*Yp!M5{hZ7JJOeFnkF<&YU;u)!~Zw1;Md=q`J@#8K8OXAWNOM88%_b3A7! z%muEV30`@;#XufUH_4M09HJjiJGlR923mBfYMWWv^LQ!*X%pl6>j;CyQ4T76pqPhu zhs=cVkSi1@B>H0NP40~N{VNqm-v2Y85}G~q=ka>FALW5Ta?H2W%jr`g<>8 zMu_Se^ezrtG2&vc)V{1%VU_k0@e99`7FNVoFEV?{%Dh1?cl%FV+bNMZ&<;Gt$#7&d zUBqYAU^m-wL3UZlY+fxLB;aFnK zXd5YY*`kLJ)YM%lX1AF0(Q?Om%e4Fx-#85MlI_47N_E9DrA=?OayJDd_SlG;J)ya} zr9@1%)poN&Ibk%=L8SFaU7uT}Wu$^QFPQgS8S47XV}w0I?X22)q?;5fka!4xg!Yk; zlAIY@@NA@kbKLTuB!l9ODW3bI4o<G^ZI%rGiB!asDC}}b>Wll7=Fz}U*hJp7<<%KM zt@)U<@}~L>2Q{nz!ejYpm8PC8MuGZhEsP*R`zS0-r!Ip{CQj!65kY{3Tnq!QTe%Pn zz-4rDWGc2RhiU>b6l;0}q3KeP!eYRICKSlsZYZUNC$b56Qy(abTwZBeGH~H~YG=q} zcyI|3(l3#XmEb8hyq15L-NzMa1aVn`+mR4pH7qggme=u_#40!9`Rl;()NnGCPRh2j zQUn@A6Cz}2Jh?fWi5@IEGKo+H5jB?{9&Tiquq>_5fCXa>afvVLHE9uvejIW< z#aqKMJY{QOqfWGHt25~{gfIiwqTXgB{jkpDw9xwWlsyFG+Tr`T&R=Qg_>k5n0H&o` z3tL#k0b_H4ReOdNA0Dmrdlj>*x`;!xdVut|HLVTh*bO3{sT?;Aymo7a;6%Lqq^kgH~;#PBt9 zni6Et-Q?g$A2&@Aidoq(MNlS5rC6Q=IecC3=C9{Mj$os5V|`ze${elmp<7D=ag>ZO zrh4|`C<`q+jOlEqx70WzNYG_J7;>Pz0=+N5*ySP;!^k~LEpcL^fWp8>fHO5>I0`a7 zLj3S9V#oR3r*TCdOiJ%;@^yZRHd2dt7% zOtKkk*u@$%}_1No#*Q6kiGUZ9$;xws(GGv*9Ms4Kr2Qa9H3{#<~nZDRk$fr*9 zFu^h&{D9EZA{%SO=F`AFKdA+xF*Eol;ut1Mm{0!Xa}#U#!;u#vU+3?FZH#57RXyGm zR@sa;->z#h513N_Vg+uXRPfuK3+Z>y}{4K&(`9RESsx= zLF&SxXp9E~dEdfw*9eI13=0H;faM>N5%s?4_W6{`tB@S_ob$}hkbI}o7EbX)$&t7Yd4yG;tm*j2^oAgfxDl)iTm< z6uH!JogG_}6Xyvl=RMDrKhP_Bk}HcMEy!Yulwp$LQK-s#<271;srBD!XX*Dv+d-Oo zy@t^ms^D2<>Ax1DM8r13($pEvSXtn<4{rlXDw{Xpxg|T*xwTy8C1>ZfnE*|g?v)06 zMP{zH<`Nk5JHyS|QR)3rgC(63r$Sn|{u_(K6V7a>&GA?Bt|l305=SH=*# znkHHs>MQEo5v3*xtWqe($ee>=*S!8_ai^pnYK~GiQ@7MNb$3yxNuwq=5Zu!v@!KMH z2G#i*CD!M$Arni5yV>?1!QniK)`5Ey4%oAH@3Y~;fb*#q!_`exlu*b_<*X} zbxFxB+6oawAr1W|WoRd}@C5UwR&A4_0m@Jfnzpva$Eyk3wbt`1xBVRgYAZ(*VX`EW z_}LZ@HE-2Un-@}NmzIQgSG%`P*YzQX&TLhy5}jb{61f#Z@QK+OR(v={aq+CDV6&DQ zwkwn`kr{!RpZPh5o76`7Ej0>bd*|NGz4<|F!DJ52pS!M~EgA0s;{fL%v4Ll8+Zyj= z{K)K<&HY-UgX}Xq>gsHR$r#I6oe>yAh>3QpmDR{-m>y%dFUnLtvM;lbG`&7e z(cz800*eEL=kd!{68adIQ3&Q0W(dquKajvB-b5wAl5yF-iBuB47J^m<;rdd) z`wn}X2VY?xj_gQkJiE4r#+qPq^L~_i(110{=;yt;Ekp^CnYyb6MY}yaLnrxN=z@XN zNs8ZJ=$xJTPVpGOW0HHQn`}-iL5+_?4O+s%o}Mgnzikbt>Bol?z1xb%8Qs_YyS6PS zOF8BvMhoHIl|AXxUI@Ahl~DgWNd|WqgaPG&kkB5C!2(gjds^PG8KxMPY^zOE?|DF= z0)aAEkoiB5tZnjuK7HorxDge+5iWLZ*~Q+1^MX!T;-ytH?!Rna61Ylb#w)hF9>o5* z^PN(yB4W-w!R4%xjUt|s`@}hIq1Ac+JY~E_*Nmynr5wtRj4y(bs+Od47IO{H?5a$m za8tO=&8y^*_i7rs%~&%quoOaxO3lV4(wZhQ>Jh2(D&Fymd%hfBt^s!dSVZ129{0Bo zyZxJa>3bLLSZ;Q3HDjRrUKoN?6*28xJ&Ky7n@3xu=CK|&o0gY!SFM+g_e$_f(GvPe zO|!QCCy>vAqgDsNrqf3QG>KbKJXTNM-fyW^oYKtAT87%uxWfZ*;Tc)KKNa#fDTl=J02<;z5 zLFas}_}dz@o=`OYMm!a_pc4-lDpyoML3w}|k7|?D^-xwu*xe#%qCg+!26>hDXG$%h zVpSFlkvD4nMVtH32HH>Pk$25B&8;gOpaM~%gsiIwVXIC8ejOP=m@)O&ns8P~JFh zIt+KRJTNdq=i9>xY%1{0m}+K6QTA8()58U?z`$SWva)P@Z5LjDYNh0v>FDkmXc4aQ&#WMjzRS zM7CO>tjHu5;-tgc$Fzj3=Kkhhp==4!za*oE2_P|8pmbDQG%jIE*5eSj+*wAI^3#G) zTu&m+Rl=syveK&3uF?W?BL%Z~Y;sD9e36*SUm%wxu9i1>SnW_@vNDWe9)FOitEA*a zQ&U@TmpZ{vt5k-nq*z$arJbEklsVEcpBNVF7N(iapinW@L4zt8i7H&rfk#obk)QP~ zNKMk`haO2Nr3wCyw>27~Yr*er!kfVq+d2wmoB~GhG`O6`DlgcYxBbolfDs3xF|Xsg zz=jzMY`(+jtHEUq3?7xP4-^BrVGp+{O@KLZr`=TNr&KiKk=|cEQu32b6*e7&H?M;f ziqPfof*C6P?*7ZclK$8JjNPD?MaUJZ3+xMRE&T>S{ypkT@|TtDn8H1@$FR3pw^dQC zVw-LpNyp)jtk$)+X73S%Vz#^xX?#a8S5ivb$rEClA&?__%z>r>c8g~|<}wCCVp{oz z(&z-~l5u1@rdiq;F%v_DtQ<0oR(84x)uO|(Q89)+bgiVMH^~|C7RL3zzZ&;aB&EI% z_w5U+CB-Hb3*4nPisdID>&-+6p&Ewfkd)Y_=^*iNpsB@>V4JjVilBKd{+MAk1YdJ^ zD?L#?cs&8y0{Mf0+s=7Fjt`v9+inG&i*9>XahBJIlOt4`1TBG^H3#2v?d3Y%{?1Sj zz!7Nyq@}=nTJ*Is*QiO9W@2E-C)taaZwDW_wPMpqho)MDHgBAV+Qw#Gr=PTIw}mmU zal>&Ebq#Bq0D*!e-deuAi%1-}4{>os6>B`VXi$(w8tiuuc~B7WsA$_idIHA|?NViu z_z-!;U{M9qpM#K=g($L(>T?UKN_G0ZbdH`2zr;hI+lz?0hVlx!bk1`~13AxYraQUs zi*PH>6IN8ftC?&{hBZ_6PPfksa7MVqD2{nR?1A`GtpW}OR44JtU)COe$zYsPQP7YB zr&*hM3J%XWlDhZ;BbQtxJE z0?FQ`;7CcpT>d`DB47H+Idt(pj`!5r%-P=8FE!Fj4wRjD8SgW;)pri^op*)q-=#vB zluTGmX5uO1DVWuXH7wdE-K;VQTZZXVHc}G}#~%0I9%=XLpKTw^CdFUCUuqvH*(lvx z#aqc}(RAux#;=Qp`}0G;kj$5ED(;Q0*d6yJ>5x&d5-qe2h`H*R0-BiuXzfwbIrwER zvqEAAnlTpfAnX^>Mk&YgHMZ?bVp3gvb;4s782VtVx9^GiOrh&D+1r&DdL@9wUU@A| z-rOw;HI*pR;}Y+4QHr>!u*k5tYU_XHBsi4}4QK8M6%IyDY>eT(+M)R$)8|bM3&*f4wgh%$cSLqbwQ03vZ47ixo@F1yI+nO>if@_6 z& z9EPt&WFF&g*{}P>%DYL!ua?~#1@IpN&nzRwqbnJ;DOZV)}LSYv9FfNUlaXMvlrz zrbOxRN3JZ8tuw;J9p&Lw@$Q$WhBb-1icey<%uLe-*JGp7PDu%nPk8pZ35iM>D_?P$ zQi0@I>~iRJR8xQ~dUZ^|QGgqJG3HbcKSx=)rq*C;MEBZk0>irG*=iMC3fJS2W|-Jp zME<=p2=K|iBuCN#R_n!Kv}4I83Etk|N$c->FGjT8h$C_~!EEC1&<#R2N3KKkmw%OV z?ad}f3gyWb)mH{f>e`q^Kw!jT9K3BxDuE&joG<~Z?F3BzZV<+l61d}SMFh;$(YYq` z!sU`Da~KKxxfb&vnsi$M7%Ew0?=J(&(i>->29%|(mj^tE~~YY|t$D{M|0)el^ z^8BodhFZv1`qN4r`Wy;+Lqolg%CLE-xHmpz<|U6X?})XHDh?vz#*0-@Ht!XWZ256@+HGG{0pvW2l;D zV65`EIWNJLXu@T~HioPe`#+ZMh<7*YpOdx3vT?3H#WvPiMttS42Wf`j)@nNM%N>S< zC?zg?$FY!Zl|Lc@T!%rAT6V48j?UWYRpx_H6L@xhu6=4X=qb}QGvyJ-$aJml-=Rx4 zdg2fZjmg#Mh`GcY&5`W;&@q;5kfklcUP^-d@%UdZ6u|bUyWHdVj znOTww(DNhcisl}@At@6{tD_IvD1vdA_O(xC`8aD?=^3HW<~BPIJub*FxzpF>Nt;mm zL{d=yTHeDOb{xZ6wMh;~$Qa{l5H=^W7=p)J;HRP`DN~DiA!>D09(k&EZth3R{?jaK zbFOQQ(xuoU<`H<2lmu%M4`Ie%noamOZjrFO0J}x=ru5iqEPEx9$71o9(8N4!JV+MN zxt)ETFALA}?YAxSoReJk+oAI*R9XhRi|e`XFG!PXxU zMC4K1ge&KMm*c=76!T_XR*G|35I-Qr^Wm^o7=(gqMU4*8wu@s+x%#*Fe{{5a) zJ%?0_O;t$Ois#9hdeY3i*kI|1hkbbtq;ZhMDX%FP=c0;UH~q`x%%si4O>?#*Zpgvc z&)%<_tvbW@L*74Gv`D#8a0Wr@Al(u~c7TlavD>U%OyxR(o9KCwjx`=C(W1R{zLp0x z3V~}eV6aHo(pF{LH}b=}Q}LvD-4wcE>ehDdz9t^w z%$)3>+@CzpWR(be6?M&XY_o1J?OHHUvx*Pcpj7{XYmrnp(WTP1(>?K7ZJt`P+PG4b zPX8zp!SL-r{+m7?bs5gaRBq6~V!0{gK1(oTt;$oXj;u@#Zi1!o=Oq{QfPrc^8Ny34 zP~O@Y(#Sp`Jk{Gmadz*CW#!ABevNa_{BUp%VuN?@n!(G1vXQ{(vYQ65Pqmn zNy69sgt`{x5-C;Z}+32%W;szkQK<^9WRT1W9C%uJkiFI?EM znV~H$v@;_sAtTx{uS&#vu4y%4z8*v5;9q{DnXB>qp7tXBCdO0D&uaJ}%VoO*c_=SD zGFu|ra$bs*Nj79U%Z~9Fk(9!e31DkbkMyq#V`?dgNZXT1lnYItbh zaLZ6jQFCxPS-+N>g?|&Zea?wfNg1$qcVp!??Tz^2J>#q+)A`xF-%e$^V}I4}VRggN z5)wQ|nDO_;)skbY`}0x?_iZcj#jfvedEb42oC;Q)4DLA)bVVYl`;QyyuuO7Th)sLf zxUNyGTr!R^HIJ0_tjxIgYOB`fm6yUx!XC~R!2rb>Rsq9S=C$$yW4`Q?addi?90wdb zM+qrR8VlxdbVx(kb|K~@sa9gyl&}mgRs(WJmxY3&>4I!3Bk)*aapebb3^HjQna)5t zMv@8H`}1tzR<=n4K}Ls1*VMpjR#ghVctn;20p)aNQX8?86lsnH?0EScwn%7+v(BNI zn5@{GgFFr{%Z$Q-!p~w?zRxq2botU(rsr6FwH8Uef0#2U#F|26(td?sK{-FYCKutC z>1w%pNyjGiP&>*N`BgKkI+6ibX)=*?NVDZshVo%TIE$s>&y8_%^MvJGjLlqEAC4f# z#uK%OjX>&O@Yz>=Mp$imCy7f5xIvj7qHKelo4??<&{lYXxSZZ4;)M zYC1Bb|Moj>{PvgOUbFtnnLXCaFili3QfS$Z8}tAjJA*qSzMC~y zghnp@=FMX0Jb|ewSAA)6HO{@R2Z^>3XkZNkO(7ja8Y@qiOFm|XLWp2~5 zZv&6g;-FnvByO7sQVk0a1vZDdGV@k)M17BKAVJY4-K9EzABL0yl0(5+A(s=%lI>{ZJ zR8Q%Xh>$(8KcDSeyl^xTfiV_yBPR}^Es%TPUP(&A**ZOYoOPXXfa5tPz_mYY&s?7$ ztk05l+?2@Fb(2ZxI!!5ogY=MDbjJ<4^IM);7-s24pO1P3OQ#J|P1v_x6Wc*AbY-$Dab*mqGD z?8ueSrso>>I$CV?MF;2XtCE^wZxqbxqGr5(TCf?b$pLHy$L4(ba_78%da?g7KB1-- zCH8(M@9%K!(W+OZfNm1|fdJtb@R1{IsmB_tAm2x7^Yt z_JlQP?(tpTeX5KO0x=d7xmNSWg(19!ZYyb&ZsxLq)i1 zET|aw-NuUIbR35|rK&^a{bcCmyhQiov02W2l7rDn0M21M0PC)%rcdWf!qMVm$x~o(?MVliwH1qhE5?&{YSd|nccJZ2vwRI^X8r{HQNH6M63o(^XWO=*Ugl0PhTM@~D&28K_oF$?wwrlcAo3xJ zUT7o*dDbAeCre@qwW<|64G#}nrH}xVmZOcTi>XIO6170xSsqOJT1s+8e9TO8VPdm$ zLd=Zo(cy7u8r&Qx{PJYE;y^7Y^gGUQzCjbJ5zFwUJ#!^29tI689)utI@uY!YE77C? zJ3K8US-mCV^T{m~`@&mv-)BrQ6ZyHfBVB2f*+EE2k*^c*_WqhQSGQ{6WRuwuppdrAwy z7Mpl7@+o#WH>0F!g9iI(-i<|09VS+`@|OBoxExbCP!*P^`T38xb(dE{jwvr^PyN|? zb!*N{m)T0H$jVoQMIFUy{8ls(Oa8ZS3FkA@ec4C!Cl=W^;iq&iYt62Td8A3GPiV!E z6WJEEZSoECBe~rIAJh$%9TqPNFA5)%X7gCh{K}x&5gK&Aq(_!})uaXs#bO6WBzEUv zX@a+n#gzr~6ml>g$L5%g)I1DMt;@LK1n{FEnwNV&G>%1+m<>DitS<2A^c2THxs1k+ z-wdAw^VfY!Pq<%0HVCVZ;~0*cw&NISx(gA)%7`BKk4kyN3Mp>)&0ZMSjR(MhbsOgH zX9wfZiTc=jqZ&zhWpuTOdQ={z@@%e&!#2%6t()>}liu%Y^E(?LakjyXu`-j!#hjvR z$B+T$YQF59IF@NuXI|a%RpQkMDOTY?NF(OkEVV$5&~R;?;u?A~)qUc^C=s^E8qKl8 z7v=dIVFI@&H)R%ZwtcKcx`*6ht4n`!esMzE2~^T-q5DC1_Y)fjbigdU^PPBh;I*}Q zZT!}f$6o=qM_FIRQ+>Eb{%c666av~W89>#{l7X7GZqq5tfb{|98A)J}%MvO9W_g-d zvFI%!_5%c$dkNz6o6 z%5e>KYoCWr-_|tOA6JvBKNwu2G}l->ZYUq!_^iT{=MtUcAJh7O%q+wz*Dk;8Tlly< zZk{Y&EHO!GnG*~&U^FtKTg-S;o>Y_3CmGQ_w!tZ8P{&haH?C+a3Cd*rU2~EWxk)DY zt$>I2j=SsRejN#$aSP0wKmA5>!KqfltrzUnD!QvrEKNHHN|uSZ$SI z63Z6Ml6Lrfk<*z9te{Cbwj-3JAx7ii+HuNPE~OWjiY= zJ+-1a;DDxkQ#ztmz{2t(NIIY|kzXRS-YCU=IFE&F1j%`L#Ibu&c+=3eW445zmV}3wETI=|MJzDg8OBg>HK=U zSA18D+HmL+80N&$XVbv-cS96saDXB~meGAxIHPatET*lERoKUNdP@jEOJ;Chg_U`E zwTe zMtgxx{}EmHM|**&5zQAnyn8(P9P$!o{U=(QjHFu(7GrtRq5cSw?roLE@wAQQ5LyWv zU1B@T>nGt-?fS)=WOa(CAjcCb(#Q&`faCePaDdx~?| zyT|5-rWf|n2FcQ|%!_98%2WQ1eIa;38lWWnKNx%G=w5#C-?!~@9hJ?s2){&{C5Gm~VJN!FSq&+DnxkgcqaX_|YZOljhcNLlU_ zICkc6jLB-_M%#mVD0RFonvfzS2MA;4x$_=Yi~vpm&H#4)gm)ocYQY5FzB$?CLFDYFIfwDwkXFP#R`%|op;+uKW8V?t zt~oLVE#N|UfV(+!$`im3*?nuyI}sc`cZn3DL}aX1sc+=?mz0D45BqaX8>egT@=4Q~ zwL@}k5{k4Ur&QImCp34+=>tzI5Kbw~iY@sUWS-Do`0@?TL{u82%n7}}+mPog)or)6+ATE>T7dN;w5+3#N0hwt2IdE(u)&`!~$LsX<_V&S*%Cz0;T zx_C1yFCwt+Td9M@0PiX|Fy6TU375pYqJSu~JGFjt&SaVFe+G4;ZQ&U6>JF-V8n#t! z8S6Awr3q)I!ZSi8Cmwn`sNJ#bMFDYckZ{A5PFOI6WBY?b`H7{_*r7rp3{sZW<*0N(aF~RTrHzU>%Pj3#`XT@YrR6%D? zJP7|LrT?WpLm4u!EXKhA@l4o<5rDMY0nOG^GEqz=3FyxEPqO~IOA}PHqGDV?22gBQ zBfMa>OVESyjHa?5YN8hC8aeiiptn5n*@FD!llSYLUB8x`As{)fAv@gBPSh7{bc(Gw zkgmO=k_8*wVgE#Q1|{xexuxs2sv5WjoG~>lv&-dUEmeX)9dJ|_(nW|oRcGaF;^HYx zls|+04G`F^YlMe%#IcMR4SyUakMKCI9K!S?`PoyqC}jl~Ajyt~C?l&|ua@}RLK>?NZn)uoG&7#y=N4~R}#-ZO$T^;`Y z0-}qlUueC9N`C$00ZDdIw`_fcf4sMfdz>06wq^au@cdb!bz%C(@SkO!7-VgdJ6l+| ziGs-!)WGiMYseg^xP}sIXG5V5ohnW;Rbbh2X+zvOq;!St)Y5U_4iJKfpa8$TntcLv zo8mRnBub0^^5i`^W-A7KBR{ZVoMlE1ZwszGUIN*-ioq9rrF;2PWTe}`&Lq`v5PfCc z^(~R~?=4u_67ZRx4OK}-3*8Q9OvAiri+k~v3(T?1m+Aw64}JXY?X-!m_j>-bMY!Yb zv!tuesb5fZ6#w``9wlE!ZeXK1`J8SGPr%A50x!6eiYw;y{|w>&kXehn!>V1yjre=9 zZpkKvx~Q(~I||y9yY=h& zy@L+HZgbgg{8-5GqHMU5RB3DDsX#JUj?5LhEf6mx4(f<~ZcgYqy;9k3oaYQHCN1j= zv`+MZX=P#thXL%+&KthRL|Mmr2{O85R5)vWr*sJt-^k#-^O1vSPIK9XgNUuUL#LVN zy#dC#TDgU-B~IeTs%z}DR+8`e&`&XoEIoSoFEn$cTb%H6hIC@-1`;CBcoKsEpOqA$ zi$@V}8V3RgPspUep~6nuNPpY$+4D4|-LSXYM1+Ra6?!n>G5!jDqvhH2Q9evquFF>S z*|J>z!Y`HBz_e)a_)jUScUU5WT;{XUrFsWVF{LekpZ>$)$wWMEW44pu){;a4%bsPA zj&;7>dbPjQP{N{8(N@GOIYLl$%2uFB4q;c&5ln&A>6YvmF@5!V6QS~^f%QCtfT`IH z6me<-JP_Vg1}TdMfw*uJ@5r*@{uC3Ppb)c}uwt&}DrYP9Qhap2#eCntk&4PJ*|lbZ zpQD4HEtd`u_{ESRKMh9f{`gvAtczQ^5qS6f<1ImzC&}(#g%0c4s0T0CX|J8t1A2!;GuYW}}>AsCHJ z0~?K`WY$oOYk==mn#P#Ms6&^a;;g67!!T_`)DC3HHCBL1d@KabR5nzQxqu5B9GQ{r zGX(o>kY-aJs=VKrsX3@dlH#+1{x=J?5>q=6Cik2?N|fCAIB_@nEnAMeEgOhKhkOP2 zVmw-f84Vk}ZZ%%zY1M}9ueLK=<$54E@g_3`*tnU(z@MMm_pTp~OmwfHw*LBb!wF-~ z*p*;49tCpkgtO^z?%aeE%i6MiT&qHuy7_W@EIk;!d}`C(aVng5^v_bgk}@lbpAG=V zfg@36ichQZ4coMC6qaM~`DvS<+_TFWC_*jWJqU?mm2ICo4g2`C1|<)ijr;Tn%IQvA zJNpk<+Ojb@48b@91SzH2f{?D{LIhvO#-^rZ!VJg+O5#4DFn?OqyWGqhqwiQ)NjP@) z_*c~Wfs?VcLNfZ>xRIdXeepmZ##>N-9)3eTHOB_f%g=Yo`d5YvTtc4Re>OfXCkKwL zmU9#P&LiJh;z}M3n(pL51tYmv!PV6jVPF+#bp-}Az^5rxdHRLXpC4ILR+#D2x2{y1 zd12txlA($NpG6Yiq-8o9MhD=A-`s{Yt)nUJPpi*zONv#nw|!b0)kQR7WhRWEKLLmAq)Z(VL2%lR7f9g zUD!uRfDDGRE?1jI;&hurnUbLbAC$C|6cnKrh(hA5?<;H`!@r+n#;+Iz{P>cW|6X5T zcP1SUkCTs01ozV?`iB#BqV>q17%&sruxjrN2SZZ^ZVZ72BUfrifZyvXDI%aE83Ys& z6S+oFP*DZN0$M40I|$HAV)0p5CP|p*75}Heii*lg>bw@LybhR(e2|qeUDI;22?ha8 z%*2k-w3HOVJc{lrcV|UH0DG~oLS$0Z+_i9p2}W{U=3Jq8TyT9Mu$cOQ1qS5BV9kUI za|%XY1CpeV@?X@z3no9))0c*PhT39>x-?0%ykPYb5CJu&Qs0Y}dSin<4gY%LRr<}J zidyGoBw!dENRY_dVRAZ>1XBkQhq0=rGV`MpDc(jfjDT$Wpy%3@f3Mn!Ws~RJhO71W zc8`_XZ_)FXt{ZaBZ;s4=IHo5Im{J0=297_1j}HBoV{Z!3*bMo?%>m9eep#(JBZgmELko!V!a7+68KJlA=Fu*=($bi9 zSh?Gg4p1YoG;x!EPo(VuzTU3LC66n9VD*C9W6_7zeZHmMbXdNTQMKvKL~?aH!LfMn zQM{054f=Td8Cr3J`{gWxtRI~!nFW&V?v z@0J;$14G;>hl@!DbIuVk4fQXAL-=M{>E=41>mwq#zH3IgAW^Ni(ocw;0gb?>ODsUl z>~AgG(T*ddbKnCcFTVHhhLXMh95925rSp^^)71S&hw!9J*bpih8EZ1-;B7?5X zKo;nFcxsjkN{wyD81pnD!0#aLaFO&+{lHxP_x8?ynU!5@{MPpMJqlH?w$6^}C#B}4 zflXjp49cN4$iHC;14vhD8bJw0w<9$J`NiEdbJff#T6G`b5C#{0V}j&D1)G%^ny;{% zuIQCtnRJb+h3~5Bk}Me~E$&iTR=69c0&TAwq!z9p8&*RZ;m1TX0z6?^VQ7hu%9m`~ zQrvajSTuTroDN%`I<&_^GcWC2Ghuj8)GZB@$u{Pu>Ux5f)v?%v9!*4MGZ2bHOyn*p zZ@;`aQN1c0*`299mwK@~XlCX=H(UV0lYO%;29&MaUAYT_jACqwmYb3F&c@XNS)qe~ zP@7w0d`O$Qyb0a;5L832M;$IINPag$Iy3S;aJt6H)9dxg%@Wji*h;Wm)Zg|R6k6LQQDL3Mkn(;mv z@!l7;yQDY5O``>I9f0NJv0r-;zLe$X2wKFZ3{&G&@0+Kal8Yf0@yS#InmRYWE%mx1^MT-9`g-WSRBA+OvLh*lsNR$e(pV)taVjv2s#D9V%3aOM# z@Sj-xzbQbJAQFfIk^W*o0;B{QPn4(lBmOABF#~}pXhn4dA)6^9MQO7%mG0nj!0sV zgrfp6XE@34gaZSs6KWPS1(d9R7|Z_+dW-k?7I@1akuSVl7_uf66T=pc#84WFCn{0= zkx&%IqHxRqgiutb@I)k1uIN9BDL|&=O88G$Bjsf6|7*B!ulx^vdD!kqmPrd8%irV6 zH&`!9gO$7Sp|!!;SYNC*|88n`tYGzlQL*&E+ObC7Sh_$ym`?B1)2(&OK*$UMAr6ou zv>0v%-cW@_#c&262*!b_m6~0enIwM_TqWcs2jEWYM5gyh9HQEUy`@lG!^1?e%47@0 zWaA2~0<>A$z;_v=EkqEOEXuL4hFuHG%qJN|ELEEEU~#AkenX8ZgPDi}w-@^_Qa#WN zSZC4!p`vihWvU8aQG63dqqgrVmSxy(eHO28r*Kbg3qO%6_FxHqkd9J6`8H0@| zMTcNGF}yXKgK6|=PD`n_!y985BN+R$0@ED<)~g!Uq+?91pYO#C_%Y{DXM{Vtb$5|O zcy)Ir!*ed6W^b3NOxS}nutjqEGQ2=#wlzDzDe*gKRPx!bV(?E%H`^C#s@Qd@p z&B^)8C*;|1SIbYmIyOc8y^cA=7>0j$JF{`-Te~Xi;Noffkp6C4XN7owKdWy0yos#i zL;bhs330{d_AdQxEd|e>{uTap?q%+t-E{x4?$`aty9RrH2+M-F>w8w+fCEP)fbLt9 zlUEBJ`y=gL)o(=~b!GGgI%V9G8tR!5PQbF^k#KA~5P(Ovzd-F7u1XspD~(<3R6I8i zLHPq%&QJ9{QtA>&|< z-slw5!b>!WtC2SD#cWIJb73!j1R>lyx3#hajys>nl8^b5++2}=g)X2C zm2+$El*62aSuY!-oMpVutYuV8lQ3ps8Ld0)t5}zoSzg~$la`5LWqEbDUf%8txD6Gq@vkiUN$tv8G&qa6P;*gw}kz^V>ltE zWfZn>o(fMeMO{K$YM{|Er8mTU$Kru>Uwbtbg9eGVU*D z&dMkoT3qerF`ry5HucK$b-?w#@eM&^1ZX0qe(yE!rPZWRtJ9eqn3`}37*`os8QWlZ zY0D?pmA7=Dd8@J9MDI75bE6jbkk5r>AYOoK_GcQGLm2w?7M$5Hlg`Q2D;L;Q))lO$ zDR!9fYu&J7IN*z`yP#UXn|4%iQ-@vZt2v{mkuzS&V_w~XNw22Wqe@KJ%OO=R3CY+@ zt!lK@7ciGL)-?upW7K<5^F7#^mNZqmQ~#zWn*5CsFpf$a|IvtLbNyBG08_myml`gj zMKsxbEwNi#h3EZ;@a~s!TB((8jcTuvfl%UXns9?X6LW*3zlCX}$fQnrg2CN4@G2T1 z z$I?%fY6hKeXLd_B#KD^|E?-Xwxo$~Ajj6J1ZeV#}r;gA)Sv&KGF45woJJ@chlttij zVudAkDYkthn8bkQ*Kz(ahE@wXqkljNwo^;^^O#cI1DCRYW;O-yQY4NB<}ri9S~D;G zNAEmV>6<1Ix2B^N)%TgMi z+jA^F^4(ae!y>BqH7cYYTAV@Tl&=`svfrx+@ zLXX9c5d~rsWt21IAmbzh{aH!wH3W*S6l_>-TUQUdtIMC z=h~eeec5hSbokh1a~-Q6FaE(I0LmkT04f|gMAtFrQ~(x$(a=(6wh*~I`PbOncW()l zdX9Z8r>XGxbV?6g^*!_^j-5t#`}|5}kJRT1B!I(d+&|SWnb3a^%=2Pt)SIDt&qHCtt<~{O8*&+`FqLKT3Qf29s_RpL*;A5>1<}F$*M9lt)%-GCErac`ivdG@ww9L_HFSVsLj-=lt#O+l^UvVkG;Oz zZ!5Wd!D&Nvd7qPyKCRv0Ymc;Er47-bJa>s+gT<%+vQ?%Rb9NADUW(wuvjI%R`$>CP zNP<-If+N5W%*zIK!^481_Teo9|R|18sl|PRCV4A!H8i3ena`;xq}QC{u;2q5W|E? z_aMrIaK`OXV8n1>g%L*k{;rrar0#NHpjijq=V3YUz=&4`AlA$8dSgUrKo=sM8?fww zh9Von?2%xEX}}*7>h;+(MaTKTz}oZep_GXNCh8E+jK2LX8Gs$QWJCbKNeJdl0IT-A`2lMC?^DZ zvo;vIXdj?SL%Kbv^su(XT~Y>o2UMEyZuqnTp`m|u6s}lHeMUo*29SG)_K16^bttz| zq3Zp^^}T~y8li1)sl+wr?yOZJ8$fA8U*On=sC#6g%YDV+%L8XC`i9I^f*S+&)H<>5 zxH^bl@GbqS;mgcjz)M715a$SXLf+`MX712BG49kh0$s3dL|brK2-gPg@~#^eL( zM8pqyN5BtyN5l{NjSx2wRL?)a=nioU>5g$L87kkOvx04p^EC1WBrnXo$6N=0tGN>A z4x)vS8}uuDc2ICA$sXbsj049DS1%Z1kCG$s1!*uXAN3a1o#Y9t3o4GNXXM7ko#u(^ z12~qb2Y3|Wd+bKm9pskQo#_ec12RYGgL$yGZz0@nkU9LiudzODS2y&!A9AR7;6B_h zC@I{}+#O3#z#CsLx*HU~uY85_1wl{fBYc;C$8;!eH|2+Y1lbk(LOIChj(f}e@@wz- z27C`4rZ_|9buLZte}--t!IJ9{dg7UfhlP zP%cKx;5I_xjoT0gpycvf+o2=Z!PDQt6KnrrhZFFHyZ>+che zkDTf=$bIjrJH44lFYp@!zA*P6*hWtOL@%skl(sU;*%<@2MmV}J(E(qO`w#LMrG5h( zpU5Mp_{?(O=?-tBKM$D!z8Lo(;(jum;`rSECI;|jIlS47oH}8Y>W^{wT-EM8h(<4< zGt24URdV_ae}k9mk8^xVjhw18%IUx3%YDO_e&g>vBt3|3wb`h~>V& zzL^2Op$>1DKh^yv1ALu-K78B1`Eh)H+X23~_8*opOM7QHK3PXjYZ>MIVjSMAdovgG z4FO+J`w!|ErM-9Gkqh#Sa{R*_pD6ne%0DIjL>~RJ7l7M_sA6#w>I(g}K*lnCrzFEY z#sa~{+=s_3RL2B}<9TPqv+RVp*MZ@G*F+7Ua3M7uueyF}b~;q)60f-z=w4iZFH|!w zyF%I1IUE5s`3cPQ88=ZrVOB@Y#C^uRWB*_@R`yR77kgyO#yjoT ze`6uW*QOSd+KI@1U|eF+M&JmPXa=}Cv-1ey?V}N3*YtB*0iT(}5A3h-fmVhi9vX}O z@kZoywzgre-?cdr1lI=@|3P!%8|NlaF~O4pg1!0B;BIsKvqW~@B5fTUXwXFf;<*9n z?Yl1)2=LOnEbV!Ydx%00Jmx-b)5Pn4e2L|eT6J;G#9HE73mllFKkkpORc$3s1nA+xUI z+yun0fJ9}7h4Ab)39eH)M@c?Pvcho;rhq9D=`lw~&h}Ir!q)h8NoLFbQV#%+?3V~1 zk$!xw0uzOq+-wFXe%i>PeY*}Gdt%mj^?r*zyuEF{j~eEJNN<~`MYaKkl0+!F(k)e>JYDx?g8lm@`2|9y)Ly4vMrh( z`X2g#B1xa0zge17a7v#=H%Cfd!I0-f&klZs6!IbPn!F%Qax0e#()LIbu9 zzwkt**M_EdQz6Y9d+dAv%B4coLh-m^xjN*|y-T+pGqfdp19fwB^g?+o&Yaqt7*qMdTa9fV{IcbV-l_5T3eMO&Klp=0L_Dln z=10&q3V&#B&vuodk1(ePfc04X-2H$|7pW0G_e+58SFq*V=;s&v0{%TlIw0eV#&^gh zN1&cZ%L7XLfgl)%w+;LS#3OgLzfC~6LQO+_Kh~mQ>c#6#e2MgQ^FCxm$ob^`K!2cj z=UA2Uj>J29{CYc=ap2k?}904wj`t6bSdZDwbsYqSXP;eIS)uWv=|Kdt> zlZkmg$P&1ho|Tky{C~lG=BCIB1#6X2HIW<;AP-c)E3rPNGhnBM%4MkEd>-~uear{;UJiU-IlcvVHDu#KP7R`vq+OQEZ<90Xgi(+sX22EEJJe1X1| zt@FLRU26t;?L?Fs?4}BLeB3q|on6DoPhKMe;#?zKH zo;IZ%kUj-F3MxTneuenCMQq`?l*J6)C~PdLM$9rUptV*xp@V-HtKe)_@*YbLAKuIu zg`wZry?=3B^tZ^*(q<%5BrrAE`J0tHI#XdRxgBH)=O10zRG{Ap;7E9n_+I>gQD^2M z)Nyq3UT^*Pg65jtHFdAOlRXFo#9fR@V3co*Jf_pvUfS_X$S%X|eamVxtn*k$xvX&Z zEvGYMGiN5T1Iv1TrYblc!!kxH^dxx4OaYeC3hrG&AeLGYF1{uQ7H|F`HFq7<2{bpgPF@>6yT< z3Rqa|5)mWo67Wwg`$5Djy{j!)Xew+jF%~52&Gbc`=5jc0#;AuG*{*FBd#P3+!MOCd z#qMwX7A?RWno*T~<`h{YC%i2;@_KmXI8gX2XB-4bo?XfUu%yJUb3FW!yIH(p^ud9+c>8MoD*FH^)IX4 zo5yFzPP}QZw8X9Snf&qbkx-_tZcEMNaugLU@0rA@0YzqC>VnSPP*AeISzXbLK{nB0 zoy+gjWqEPqc{w4;RnK#|+iyE--Az+@nzS;N9Zpk~t)wgdqdBGOb?2^!-}~*+d=9=Q zL-^%*3Dz#ND;3#otKD)X_}`_SQag>Xp9|{~6A(9C97~*x?cH68#6`#rL{jnCj`)e0 z+_M=;+U@WWlQoB_fE6ygI{zfw;ClRG{6;zxs|#ss;&$JkomSFKYEk#JS$IlGG}(h< zlUfo8zQS#~Hy=chFc5P8g@qz$BdXP_PRd7 zCPCQEWq>?^b8!G%_1~T{@myHwJH~HQaW&&(zd>aiS1c-_!<&9fSWSx?5wErI%v6w7d*PKiZVwHn z8hAZGH=Q;NMYmFg0U6D5jjJwhvaVd8CNChr6-7{t%H;LcPInMc>;B2cxU-C@>t>=N z;l|O;S&k|uPccxv$<8+e^G9WPrYYB~WXbnvovlQR@RHNab?S$iq)bsEZ(-jZDU}Y^ zaUfIi2gy$Som%b$uifWGcF^_v^h%Z-&WE?bd%LmH)_cP}*K#JDkvlE6*JUCv%iJ!j z`2vN%rC{#?dHcFIl@piFAX#E}5ro?jJ8)p$%$1a@<>u;_)bJf!_Y)z?1G{g+<+ zgs2Cf4gN}h30l4sL|sM6?R~}r`SsmS2PrDK06uR<@YV4!nm1uLJ2Jgyl#U7%($_xSb=<^H22k^U#C@BaENlE{blT#`%?@pyGRNYeY+ErsB*0n5#B`%?x zir@Z5BJwjcUQ%mq$^EY7S%kANFh@`%dp)$UU7S%6^FJ<2s9|dF;*oO?AyL?oI>d36 zEFmG66r{;}OT&b@2>@A?tBAtrK}j^Hqs;BFT~Srn?{eOMUlw_ezw&c0dQNVhL;hoO zpz84`4X)$L&>?B_PjkesQXB7d?lQLI@S5g0tg5$bJNSyG7c6g>YkhkGk~ z!zLPdO~ydg=d-rtS+13?LJ&RzYXuGtfThPl_y`5wcWM0jNi!)30ckoplseApRmuF^Lu4K zy#9u4gDb_^!1?rCPIY_Q{-7l%5iR6TpqdY|A^Hg8?=oBU9X&2BxcE+g6FD#79PGDtf= zM|V|(f-~o#Oc&Gz=}_!G30-4IK#5*PCZ} z8L{YGIlKGweevcq*Sf8ja<-OY1bYOR^|l}TDegmuQ!Tmj?z7^>iv#}*>#+H{rK6EE z5y6$S|4>i>6*)&xW>rv#l-Ni#*#{j!Qv{ifH8vka#1Dy-K(ui%SU^$sk%SZWY7Zr6 z&Jj@!%2F(c%rqoxF*7S0o-rNXsQeE~%R4gzIjdsNZ0JUZx&ROh9z5g-sDPG|7SlF- zCsCp(0`u$W48TYFpk&JGrKM=OicP(Wb?hv|4|eM10QIUF2--EmzlD^9(jP;YX1o%Y z%;QBz?Y`>0=*SHDjE#@dVp(&!J0MhC1ij!g8?+VtyO<(<5>6hfFFxuEhwa7opaLe= zv>)Mm)FcL9ihO^|sVj2=a%uEi()rYYmvSc#bp27;o$p2YU2Ls&WAl=^-kcREabVvTLs1KaV2k2t}b$ z0dlicxRIf8P%x!%I>|mYiNF>hePO6vt?5sv$J>+geeJb>)kfCuuC^2Ksyfo|5L{cx z3RL`gDjO$8A+27LjQT`rWy5O*9(7v9u+2-i$kpg~P9bEbv1=w4Of#))t4CgG+Uj1l zN3{($bz62m3vV^-mYvVK^yXr08+LcnaU-z-;hhNFXmEERQu2w4IpBrkMeNpSQR8NA z9942@>z5jKN-7VoC9UCVQT##;1ID-8AWYzdV##eprL)L zh70%aa*M2j?BOo1*%=W(Q4N;Zy4TbCUPE1*^!$&VD1uZRUaj}Is^f`L| z*$SY>$k|HKEAmgGOyCsmE_r+)eXYFWyX{e%T|sG1Td}&asLwlh+@Houyk}*jbNN9$ zkCQi7Uu3E!r|pD+>F2M-T)&fsupEsSW!!%av>b(UBbd#gTU;QV9NBvt8M4Q6SiD$e zrJW?#cUL;~3RTJA?#<~CO$`v#znn8uC`}H~kM(Mgr^S3+<#G-#Qj}6*Oi4(GB_(PN zYLe0%y~;6Q78u!FC!e&vG&mbw0#X{q+6Gd8&aRW=YT#~F95+&~ZiSk7Rur3}z9xB* zIK7V3llP`gk&Cy*(3+Cl6D{@h2XHU=k}a0_X!q7ln%GcIF3VY$miG^TCModNy{@Pc zt!F#$U8*k9CfR0mM{*G}x0h+-q01zZhHai2VY`?H8)&_y8gGpKN@Kx4)x5bHM2$h$ zxqFSF-JT(jJH0&qlhsyet8n_ap!A9cU$>DJ{Mi@ZC-Y-mi{mR3v6W3NHHA&Z{IS>c zY(N-xQcgw%_sF-u=NJ`^GDCx>$4g39RjoD)i_X_}OIziQ@!%nQ3VsWZ?gt%!|KZ5U z)!J2o5OLfJU70=38iVk!E7!A4Swb|?&QQ3CCXc=tdUBCDSj zhXBWARtGv9LQ(t;nAOn9QqVp^Vx}_Z!6OJjGI0>nBRndjr_jNR1~1f)Y}xGevltvL ztfZYNUP)3hf|s9lOdw=fREBoRA+xxn2|LVrv~Lzaz9l1VN4C@8_-Dg}#mYMpsYZfJ zOZqQ9IWw~Y>!QNS-V#o{Vj-mPNJ>^o*OyjO`BkO#SLrXhf0s%))Ppk+n$7z6okT_Y zQQUckQ)}0GjPXOoi4Y#mU?3H}DN`UZU7if5h%Q<1rU>92@$*~0TGrNf3v2v+{I%lN z&mHB`K}oD+);u|-BkP!WJHRpZ3ske2M@>^-sm`mrU8D$3r6L$nKlQR#A|}@oy{dR(l8s9b6$_&Zv7$2yI^} z0-J{^%>6d3HNAjme%a-}+leekMy+2YUB?zC7QLF9+c@7WDr5f`fe3*R9~?@q*H zW<)ps*YqSPG#apFf{}2S!gTo2H|4L8ob)qGH8RC?VHz(QsD-y;x`1s}DNu-;H=R3+ znRm>J>KOmws|A!(-`sktSFcJc9La|$-+wBZ)vxA4HEY=m#zT)N4Wd~i7Fm_wE59y^ zx0_0kWrR&~vEl1b&ks-{l26qa8I#v_OJ^SBJ;jai3_*5odx$K;<%P_Or%Tk6}LuZ`pB zZd+O)U-ekoV88#sbkpO@vz!Rlg;kF&_jT9Z{I_2Pt~^nfgL(?KaNf*)-I-_A{{lAW zr^i*X6G(Iyps6gI4?PhopZx*UygE!_Og_N@$rj>b>Ye0cHZPmlw-P$ds@*h!MYTw?unfG;QW zstz>S^iW2HvQ*{jnzD}(E}T!jQ()Xe1S=<+{ZO%~Y^gNpEo(rq!b@MjApe#7OB^;t z`*T0?`}iL8(N%g|Rm&5dW60$y`IeV0JZATE#AWDd6q^Wtm-pvebD1Q%5q++)VkrC- zHwYyMv%zsyQ(6+^UnP%mk|OsPGT|ZjF%oXsr{pWgzl4k z3u|^j(^kuATsaAbSwldP&c#1Yj;2nI<7j<6k90w8-G68*xjjfVwpIKHvd1&T=f^ah zQkhli-dxXzl~3{g>-;{32O7D@q12ozO9Fw9BO7?iJYUIYB zd-s9-QJrP0#Zm1yk$t5X6UFw)5@aP`d-6JvQpRhkAfXg1V78IzDYiEXQo)Z}#xErA zB`;^6R=&?%YF=|nY8LGIu*Xz|84E@640)1%(5AzcO+J}^_~ahQh0fF*RrJfkJqp6@ zPut)0>&D3|nzZCY<}(ug$kj_V-W)CGXk7}Lf6Z&6-ZkFKSj*CMh7D0bj^4qO9C_fM zSZ~nuAxWIyW`NV&B_?WTP-hYK-BWP-u3WP_B~;@ga=dI}A4Xw8^)p4d>U)*HrvioP(hj0~#VIIHjiyOM zx+0<~tER7vW&o^)|GMFwEHQb-uW%A`O}owBb?taxPAk;WO0GSL+fEKttz%_A{w8qw zDd^sBR6Bv|FqIbd&)4P-bM{A$<@*@ak$)p; z(Sc?-rT`m&9ZK{FL@O2Gyx6d6J?6}|`1?rZ5_r)#NRc7qi+URi9=s_NQJ1%{vP6H` zpRHc6KAg($vz;34HZvpwtJ=7415!RdMm@~5j?|TB;%AQ6*YAZmL!Y;AF&51nEhCY` z#Ylo70{cy$*<(JRviw@h(MD%FmRJ5>YA!E^e>7aa&rV0xUH^%76)Afv`pc|^2b)&( z@G?p)G1l=&knGiP)AkGVOZW?0PoZ3$o2R$QBDq7%NAu9fs;5JDvl36$xB|3*00B=F zGI(~+gp&e+o7S?ZAw@)T2FaO%RWpaY3@Zt19^zAg-FF9T*A)1db$7c&Te#=AkdQrO zeh6Bq0@5$?e|RVZ#CKFHrdFb6AqZD#{WDq>%=^V9U%)iY=7btSw-w2Fx5kdXA16{Q zs+UccQtYlR+sI`QF0wAY7=@tniZ2+a5g*Yn-A?_%U^ z4)_CQ{1FVjZLw#UElB|bkf`jNyv<@MS_?#1^X!vr8r3eA!8hb75yinh4^&$szo<_ z`+z5pCir^3`#1ei!;7wTZ+zW(z948GyzZch28>6RkW)(*o*qGx!XehZNupYW+;sXM zr!yDm;0S=yAWK_rz5=P82xuphl$>cM0;_AKcsct6{We{|(5o zeq^A9a_tF~xHz*0q7){G%g?#tQ)C1H&0_&}8^-_IZ&N{!LsoVaTyn;N#vOGXc+$NP zl5g&HkhYDslmD@sA*L&zS1F8U&$i1p)XF9t zJWw}ht5)6Y3Tu-Y( zw;8!sMa|nRZ#Wb%ivLm$>kTkOup*gpED5&F$>N=?C6uZobmoRK6|1fBlWfqv|FzAT z&iLcpU|%(skv2wM_i)YohHKsbj`8{5Z+ag0!lhn|q0h#J%;2eR=$}L8=}YdX1v${` zv-^0p;}lf69iLlIu2;kPJq0(5-EQJ85t_9yqw`?7VdABe-6nKn#ni((DN2LccIo}J z>ZzJ|bvVQ0S73*nQ4ww(@!0M&hepPzOA}~y^-?nKSZFl!5=LA5#3WcP_BaU*Fm#28 zJ}_Jkgv>Zwc4hx=5Jo8D0oh-8yK4Ef(+NwTHCoXZ+Jh~0#`}ESyTjWg#x{Q7K3h)< z`7iS{LLOxWI6P~X>{-)DNTv6hI(s(xGbsAeVU0-q;!7O4-HnawfF;VbKle4{pM{?`Vin;$TE899tj-IjX_s+{uVLh?Lpo|qg$!-Q4hz$g{ZE@aQB`+$W zs4OM4<3}Wo5(jf8mRHF+tdy+1?{Q)IEb}B%Eu=50yQ!7nZ;Tq`;~6;?vpx}Krs)Hx z{72y%$mrueJRG!+9_SPe%kIxHlNbNu4(6;jCAzb79G#>o9Ow9Q67Dj~a(geZkMgzi zy}in&0RT?+lOJ1AC2CyW+Q%{5O?Bw2Vp4i-*HP$j;<-I`Pf6N#U9`Q1hdUz?+AVSI zpQLYBPe~R@4507k0YD!Ja+MGNAkfpvIgZkYw_^2rqzr>S%7mv7?xqOEBNS}WSl`&Z zqa%8Xgvy-gi6=MJld^Vlfyeh}SdtS7O-k4%X(@q?n(e(KqZ&1k9?C6ClG|!Y+P%rT zNugybwxPGi^=7blL3W+xg;vI=ik_m(JCnb=IA}|5&{lsJPcGcT z2Db!n57uo4A|9=}<(84!kf!=96!zei#=xompy)PZNMHF^sRzvNqFh;X_!|i(KTMY@lT;bfcuUr0%d_7^ zX;hF(^9_(T39K6Z0>qI9GL8DmRiJhM(vIeSb3#uu<(7Mi8G=X>`OCI`IpSJyovXWw zYBNKyXp*rYox?VogZ9E;OQLKBqcJp|g=|3Ee7ysMj zF}q9{4LJ8Sp;^CMPR=6O#c?|3dF*0Qz%U5ak$9 zBSB?&om$~M;V}b>`)>kMA4v8=4_|LEH6jEva`K5RI_8dcyzX%VM=wz63*i^GkS9Jv zSh_a599HJ-DA)2})VmVWy{k@ND%_{k_h&^8*8|#VZot7_Ep_xk6Or2mj!}dd%s5@9 zSDv+h%|N}$hThr z$;QkD!E*}qWPp4>^9`QI88nnpOfZ@3j>8rhz4D?Rg`wDPS__RAj8fRI8Q zz}^07Q^4b6v3K=)gZ<)G4eVkZ&SH~s81KSCW3tZV6l1pA6b9RQuP00`(7P`!f;J^F zz3uF9FO6clDni>8BQ+n7qq;2D*T?lqQttUxh?d9xfK;g8Kh57?+U_5B7~_ona7{)zq``d;y#^M0~O%esB>=)h{AZJ(6zLrt7~8|PN?k3XuLG7D?;KdF`(MlrH`V=MVMXibnR)-67pO(Ns%t4 zi&@X-vMs45>iXJrPSaUO8IbHKlOEba>2X^06aba+#eA`ixlmpxNIG>rdn1B7n7@_7 zM~pY;eFE5~G2+kFwNO9r66g|m#`o>1RdJBeFfHJS)*|z$&%hP=S>TsFlTX=fM8VB- z7j-A~Jn{sA8(OE4FyCE3lVE-E3_UQp<++85okpV)mVIauPCi*oSHgtts()WeU!Fh*z_E39+IsoC_825DC0OH=F>Daov%T4lD-?0I z5NX?74!y&uwAC0i>{_~awhh_Strl`gpIA9%DxDNjfxZoP_Pt2CT%`A*AlA&I)j@h2 zQJvD8mV=5Yu_purZ?ZN?euC~E$4>?lF?ds9^rF6%N;LWkB^QnFpIaulCeT8GTOSEq za^;*=eKCzvVy0pJOKzCIoNz5j-Q=;f-`)$XFwO z#LUvMWs}FRo&cS+nE8^NPq>nC9r|^x7Z;li?i9h;Wwr>V@ zb>oMPOPZRa2s2Leno*~rw1QC$_KhH&u@*F@V-g2pHI|}k1M$w2q>^04Xi+b&yWpDd z&5sJx%~!6|BV(rly!O#EU7ye%jxyepvdRg-0 zV3M69Juop!igpJ!ASClQ#tQugPPI-vU<(-qFThz+qN5b3-2X85U0WGk`^)7YHtuKj z47Ax|pT_-4Ebtdpib@~b7n>L?ESaf(Vd&2HAxL*kTxouv_e>?KwR5P6K+a4*>@VwA zAy(?7kq8_S5QL0-y|} zs@|^1K+qBCoIgP~Kp9(rWeXX8&E$D75L#)KB`X8#oGop0UVT%{FCZ_QHIDOR?-51b zs;~&E8>Jm5&6gmv>QE4fTsl>jn7#GEdR&nP`Ic_eOl)vm_5%(I7%R@4vKt>Rjp}ra z7!UBdjU6}*jaAtAHY7>2&!~#XN!64@Jg=S*u8nuLolkzElo+Qt>$LkeZw~r+t$hkp zva_uSodWRRgOE-kwl({gr!-u zpN*ysEscw6)8D@r7xMOxzu%zQc2W>E5C1gJXt#e$fJ*v3u9!+FO=@@AAOnjiHUe6&gop>vR2lB?&X zme_LQe2cgsVsy;1f^H&uSWJ8nvfkbKVg5a7`Z;|-2Km6|wz8y=-_OBcJcL3^%fj71 zQ$b{4B!Yk(G&UWoAyQygmiNJm^O}ix*#}t62o|6_jEpLMi>RaaTKdrL z`uod|Fr}m_?_|(bKc}r?!=|&?);Lu5Gl^eyhyp`zBg@41Qn#~6bJpjj zD#ZPFUoL1Pd$>Rp#Xp>}vQWp>4Eu3*E%;hSLnkF*`#N~sQ zi&kYSP8Mr+bJ5#WuMTh^+}(mcvc>C{4Z|vQRW4K6qtkr{WPT!&b%t|V%H@MLwLRTV zkULqiu^R|+Byj>Eu9!c!;La)_j;5;eDE$eMLH%PESbpGT4s6Jk1Z8m20GZ<&cblgF zj%cihhie@VE|4pJ^zVO9jEh%Nitluo9V`qsi}{hohnneup-qN_WjX0Hc0|{P&F%;K zo}$2d?Ĝ*Df+N(A_i*{9JUUgA>w1Wh|Y7Fmn$G3i=5bn}1JCYcn;R&uG5k<5QzqC&$B&*!OA2n(c9(e3l$=BYH>&AR$_(ET#BWlrvZ zxhboUn7wB!pKb)jf3H5$O~{kkb37b}Y^TH5i5~mB_WAgZCv>^I-?DErlT4pi= z0uBxWxJv48ES8%72OI_WY1ShXQ?ABlTKMyGbXf6Z^Rby48L6-T4b;V!Xsl(poeT@c z)gUA(uk#8D3JN+pia^ISJY+fw5cExV8C>Jz!`8dIiWr;9iT;F)%dv6V>8Y>(d^idn zTIAA{|4b;IO7qeolQvDIb$NqBpJ+h>oBIHMT-DTcpC&j$Mg4$^x<)X~n+G3HH?5Vz zMo)L$d^&QSK)TCoqv`S=H~SeEw7e{7Fxl_K5CFdq!MJL7fK0aJ-lr*&lher1(zpov zIayN_@e4Rw^v?MEZx`TotEGwh{TlhqBQnTwyclj?FHhV&A<3@C6@L@g@zGDGde`fQ zd;TRobbi8R29MgmG~l4Ir=HEu3z%Xtd0>nEazV5*^hLJHk@Z5=bjS{UmFU~v&ki_d z;j^RT;rBh>)$@k6?maieIlg1*z6u$*)mT0L^DBbEm*A@Y*8Ti;1?pMgVD*LA?@=X% zZs;AZ;IKsw33Zsa*9d9M7=9^w;QYj_qW5F-_cDh~YVpSmCS?D2K7AV^Dje z8MNt$6gk-g5>~m#5Rn-Wr5bNq=pYj6Wst}VU%3*$Q}u`rpnC({U@p)T#@&L&L(K2# zs@%&Xqftlozspa&#fR$DsbL=;1X*-%tBIZ8y6x-)vxCyMQV6F5W55Gb zJ6n-o-KE{qpOrOf>!6+veBtlMEl@MS4#-#W4#xx&muuav^w_=q#;)cV97XytJg|5Q zJyZQcdt#XkKFXfM(L9hA?U^)*<0r3EN&Z{6_ZL~W_YeR47e85S$tW+6p(jIMThw;C zFx`Ocy6OjG5xb=u=f|%sZb?y6q8*?0BRp@~JXeUekd_P@T@sijk9e>31bK7y$V^(& zc8!Bkpzkr#kQ(vk+nKn>?CJWwevXJSWK732lG(I)hp&^hcuVubY7vC|jW*{HzK^ws z{qsJ8kcasBMYyf{_MS!}(#8GFuLpD$^wryi&HZMtm&!Ac9N~%Y8B-lNgQu$SC*FV`+FPg%L&-QZ{gjgTzHlKH~_x3C9t@l3GktglDz&3)l%ejx<02)HgV$=o1 zv;NA(`UzG9oZDLSgj=p%RumEtLuopPk*Xj1ox z)>VA-MO?EsrQIo=-6@eBMN;!aT=PRr^TD{5;9U~5yHvQfT19wK3UWZ660(RQNRSf0 zK{pb5UR0q@o}LVdL=JgP$B;q+wdBb(I2AT@fGL*Vq)Idve_47oi{PWO?|t*WKSCZB zQ?3RyEdJz@WeKm{-}Y8!Wt>kDll(oGci51-{SM$SKX@>xSbymQI0kd7w1&_ zr|jIDf^y9>N^X+(k&FA5>3HI$R(>XJGx_e~C85{+UbxOei0IYb8r9wp^GeaA68 ziAEM2*-Rx>RhS{cjY5svLhF~g;MT<*Kqw73%H(!6uD`>Yq3T>qTyI@v`9L(e%zw^RoYAF;%C3Ri7Yo557@l2@b38kP;ZtiKKy$Hc?igxHl@Hzg$Kup#D{3(GaB)H07i za(|`-n{iA`@&@5dFydC>MpTES#hBp(Zay4${D1Ph+5dO^ZhR(2W_(^=NGE4U69XGa zK*s+i-+w{U&5MAr#xNraj^!t+Kjg;w{HhIEP-IAQK>hHxiBA6y`#v?5%gG=EXI-g0 zF@Lo^Qy4r7R#FP;-nstw322oh(om^#A4#|!!&9)a?gmYLK<#3`@LAg2MZcMu8Cu)g z_~~yrV$XWSiaE8mH*VLg_H>4AdVg1DG8rKf=&pGwJKc8#b5xUa)wd4E@inV2%BcvT zF(M{sBGNzgr=TmkdVUM7d@z;SpB512uXNLK7mT6RTSIq!$;TYi>HN^8;>rXL1vH9b z4MJ=9nK4hXF9eFO=%pjEvf}UJi>!(oqH8t$)JN>=x8^+V6+SrI-7yA2Lo*ythbv+E zm2tZ@Fl&OxDt=51>bMThkh}&GkhpQc85mziE6?5%d#07!avjJH3DU#dML2gm>Gn!#juFKIQbVPCT(06B;lD#y=K^`z3&+i}ehVX8brNL;Y!XdJp7uL} z>T`2f&8Y>Em(s!WhF((aTarn-s-Qr_REwG#Kc>yYd=>j*6gqT3(K*)gO9!OD(nUoj5oN?z}PNh!#~XwiuWemi)8S zl&f6wRCcyk_8R2*ZRk$0KA*!zjVh%d_^169JOefh=3PF|2rhHAgy}}Eco=4QCs!!A zk06!AspZ?S6CLLYZl64Nism63%V}Y&MJM-b0a&3$pYV#zpZ4w}<@7fpf{bt91o!k3 zx+$~aDK;EU6vj9=6ja&_Ug%E|oB$GY{`Rc05qH|W1+&oaAEan5(VYi+kBm4IE}KW9 z$)wdQN~Uk}<)4l}J8d4x>Ws}MWV)6z%swQ{Id-2a%$EyeTZl(s)d6~W0#3Zg%IQx` zJpbI1=#Fj^E4iJEv&Lj61kM>~>|78aE@1YCT6!Y5ox{_{I6o5!n*-c7@8c{Cw-zFM zBB7VhY^d z9Xok3ya5C5cw+Q0%)b@{4V{-cvT!n>{{}$|o%_?p4A(mpXyS52l-K(&Hnb7!fc;vq z_P?BG1_&YxDzJVbEqaikiQI|Fk$!RyxYw3|twH*Kxo+|~4(vm7>*zpuok6kWc`_}T ztq8XN@iB3VxpED3lD5x_tm?GufWb2f{LW zIl3L&iGpK=NR0<zIi`|+Zp|o)lbzoloQPA2i5|qG&Iz)YNv}}cVQFsZXaiK-!on80eQWAMF$VDHS)vv^xtacyaW9Db z8)BZ&S3nv@52L0(=mdBT{I~@J>aK=n;Gp`b*WXrnsT*vXo&Z4XZvX_{tQU91EkZ?} zI{>-df;PzzW;1#?#p#A!w{R8k+bUIVkEIPxj=^l#f_X-oy5FO0*s){EE7+Y*bw?)6 z&#cz3$cTh>j+wjo*1bdIc?YyZM(LJtE0+TB=UNd*b(bgYUvh0~E2dTns#Q%Q=`Ko# z9MUailLE|=A=f>Nm-wQ5O~92XFmE1ZMZav?-^Z0fFPwG@?yVj&AIgds=2g zxXrN0mGO;0x)N+VhVdKLt^&o5fa(&EYeT+!9#aO^{a>%OpxZO-)`J3+eeamfyYR0! zKwV^XP5Tg$gCP?PD}HzdYLwwVB_mhkb8o@H@ywA@nMJ9Cb|0__7)@Aa+ORnnhCCl_ z^KJcaw8_!&Nb$L9Uua{UVy)7lz3kI|fSepW0 z*rlu+I2ikoaZQ;|Tz*r~a*R_^Nazm65Sy75m(^Vu1zlqXsl}=%x^zYpy0fsIddPu$ zCT;ATLU?dic|b-aL_I7ZkCI4@*%ni?g!g%|Eu(RmL>FYtodqy3`PwP&cdZ|0xIlr|Oyno#a)YALp2GNFpYP^1GxVbPG^k&3;VblWv(&Az|n z{oO)=m{8e^OLVPMvS5202Elc#jU(jJk*u4DJcMmu^OOGc35#5tvvAxVP!~ve&dEL(#}htPuF0xb%Z!z) zc~-4nrF)flc~EOt4*9FsdTik%8c>A5^dvaQC~}RpNw^gjdPWP5XqBWb#mX zKTaE{nh8B;Eh*!AYa~6dA4Z4x{B5dZ_qN1=e3I$4h`9B)t#6p&z2@hKSCY7GxsC$> z@Wx%&zLLL1vYOYxHL7m^6oixdeN}p9e(~*ur zK9I0&Reu`!fabW{I9pk0UAnTf^~*@tJ6XDNJ)ywx$wY2=Zn1<~tokVUD=|d_QNrsu z0zwg!{}+!JyxWu~&H0aI{R1NY7f>8i=uZdwZ;LzkgaXb*r7^*^L-%OToI@$ ziB+yxEnr-juF;!g7%v8DmC-z4Ay^{m?eIek=#aw5tpVFUANuI+WL({ zT_(+`KWPu<%zZfq>8M_ac1i3yRNko^!j=GVvfw4HSIswN6O^CWZ@nOf4o?f%ozLCA z@@W@YvmMXf#Nl#|svjm<+rN19&@NlvWu)`_CSEv?vZ`#@F$|^0L>L{!MhzyXo!#9H z-9@xw?Cm3=CHyVZ!W{Wem^<0jlqt>00s$omgmx(fy&);T( z0c^^~-Hn$;87-qyCGd>1D*DgjV}@S?W==2+C&D_s?08? z>e)NBlgmL4hy6URrP*wOblQzSthBn@6aiduh&&r>n$xO=Ew$^`ov4ZNb!kAF6 zP%@*|e81G>ZIvm3O;5A%Xy@C9-I5Ucwn;s}ID(aUr4Qz(d$tLU^?dQF&7q0wdN_vR zAaiY=bRb5&wWv@Wjp*`1(ekQ|1*ZZ_uWbaQlZ(GN6O?R>O}y1*6Y8H#8w(f0%0zK1 ziaARWO@H~tL7#S+znPyIcTl>zOiPWSNcY5rGCurOO4$8pn1qGy9ga~YRx@GHBEIpP zv|+0+s{x&?3*gK}k#zy%I|VGqM!MvlbR>!0b(N zn3${NQ-sac7r_+D`T-x`NZWJEU!`Q({M`;Uz@#bM978PD)(D~*Jf&?w$2Od8j_ZTM zg`l`kEW2~_$ja-#Puvui2e>kDwb5@Y9;mpC<(M*fs{;Dg! z7&w@j|6jXZbpG)|7Fpx_QhwZI;+c2COEorSOqCF4kLMSMFm9Q)A0YrD6u6Rt!1vGV zqs$2nFM+6n+ydYO{RT5eQ@!QwfkIIe7<4i*Q8QMVnD{QulIYo-e1H4Cx%YP4o>ndH zQqft|(b1V!npRkp`1|W`R1YvT##WX6WsO@q5Ij>-6y{>NF+~=zlMQ+)qWd-xvV1-AHTDzzIw74H0(>a)?K)3Hp}dofKT-u^4|uSNt2HyjP%-%p>ixe(VQ z;)NUM^g(MTerdv4b$^XOu``e;kF^=W_u#{Ek+-Cwi9{6H>Udy@Q>BaOcK#AkLtu-M z6YQXLv0jE;9}3o-{_C;-FE8s_}SVR5P>*o5-=;aG>l)(~;_ z`ke@J1KZjYdh`UHhKtL zb;SRN9%Ma$zPF8!Mi}ZI6R-kl82So^%5Qte+z`w?zzWtB=OUU6)i=3qfnv>9#p$40iz%>YQz;*_s_P1obuLxxV zfolkDC{GS{E!m!44w|#b11A>T>V3Z4f(EWJJcgD^HoCp=ZI+#MFW z^$z?=1UvE@0c^n6C|^G)7;FeoD3QMY9mu~x$pHm0U5Ee#vHqBYWqD8`gkk)wzqRzx z_xt+nhAjX1J4SkL=^0F2G@ov>1*FfrC9DTmxyM- zqLFm}#gGqpO$D3s{}WojYe?MywHkKuqTEs|lk^E)N!1DZDIG!)7J zY|kV#yN@s>tOebaNEbF4U)Nvx(^i#^BQqrJ!-kvgapyxm`_T4R0UTl{Y*e>KS^jx20562#7sY;9+5^Kj}fXhJTPUwYZ<1YZMLbPMO z$8(2tAkv;`k5oMXK(_~OkGJ6u;Hwbv`eiHP1#c_$g@Eh79)I26UjysFwBh#>>KEPT zF)*wTytjX&H9!N#3*<&%8`O?)zJohp6Dp?9Wi7k`ln#Hs+jZbsgLEa=1=SAig^0^< z{TCd;tzRhA6WR-smFOx+EbhAJdPiYk8>E&+xC6@ubbDAg z?FZ>Mq(A?tAGo(RD4`f}D4`^MX4{TTxy#PAs4@6zD56CU?E=XU0HiT~0 z8<38m7c=$@FVHSOH+VVB7c>CcZS{{3S+hOJZGzu+FJ3%dzs1Y1Te};Az2{K6UO0Q? zTQT-v02r^}$7qk}3-K!<@eaEoBCp6twDk9*$(!ESq{&;hPp$NK9cb#;A>YUYoo38w z_IH5v_XXO>!~P_r_tv!W8yU^WgB*0~Pmqs1M8l+h-EuH~8VMEoTL0`$$DZPS21xUn zjyxCzOY=S1k&2bgeTo%*|7`1{558_(pWA9h&Bn$5Iqw znqWvaY?0T3>HIx z{2~F#dFWwg;CTftH>3E3q_Y(l>%piEk`|lV2A7ME2P~@|Y-lzK3KOT=MKtOgADCxty2MZ?kIQd=>P>E9hmV}w%e-161aK?;@~{$_E>01 z-EhSRa$gwBZ~^lDqqxG++0pbkt9ZB_?#;lU2CUWGc< z)v&fb!34GT)^-Qx>w_})xQy_%7@%%W&ox5MU zDEy~9FeYQTbFmMF1_s)7i0+e-)9QhhwmqwzP|;u+`C_3O?5SSA9>d^DSMj!sadnB_ zTZQ-Qgh8>)u?tm8_TTnBKXH!_2@nZD$z~Z3zJdHOPOw;NQXI~?q{g6LD)%xnm zD_JFClly|0Zw|=*=6=xq?(9H~SA+}faD>_25)$Oj`=ii`QmDJ)x>8rf zaG2h{{uhG^9wA-<-b>~^Cr(;xu}?WdH!Y#EMUBodI#TDS25&@U4y03Ktu016>LFUrb;t9y{O z0v{0z5@)0^iNO+!bCYwlr!=i#t@juU>~dLL$D<6k7|ZGFf7oUTx5V`Fs|om`LnO3G zL}UxoM_nEy73G`)MGVn?CK`j|y`YERIND;figa2e8DpSBFdBr)SY}80!X-Gqp)+=# z_u4yyY3L9q4=CRv&4VvN%qe*9HL7^@dOD%STJnYWu7jT469FTC-sstoTX)>N*bJBh zTibX3R3Νj*y%_4uy?^A$kvj(pe$p(x&U9M?13J0e1YUC&`U=ce#Ob9frWF-GJ= zu?Z!t192G=VyDvllN|b*7ttYJd|zG?umF6B*V{}*!V!p+VNX+fbcNiV`_tCIIYw<) z=*r>ZQ{W9mZ8RI17JuFzOgl_f+Qt!s8ddb(E~d06RssGn6{jwc>YzU*k0)@h2q&rz zjYI?3`IxeHAR>ba;?dV8G%qZO+i$?6C7D8u@6Vb~B9y|49!kv$W=4kr@U>#S7= z7s4sY9{zORKZ>s~YnTzFdaDbNN7VOMhQHq8(VeZda!;1DIrsli zb#JXTmnG)HhBP$+WBlN)9=>$sykOn5{Lv;;i6v3k;gHO6xfah(lh>&zii#F_d$j7G zB0{Rs@8bYvaUq~SyT7+O6J!}O$<5Zo7^W)z*i5`#NW49w3d5xgxnr#q>Lki-Q}YxN zgfX6|nN8`gRV$!WcIz8a!+d2C)$R9U|Hl^S;aU6$xfq)!f8E9lRa|+dGY^%%ZmQ@h z>LIJT-Vkh4_NSh7V$q$@sTo0z&Cs}{*lm>+vMxBY>|V^{vFcvLqZW6;VRj3NbP)!J zwau;ipM6W2%sJh==so8G)k77wG#A3fj;9YXV2rfoXAi9*yz~Z_3%fd2m z8KG4`uFKK`kyCYxA1X8olWQGaeKX+~iE>r8fgJe{2A28gQe$Wl^55Kg>i)^4X~3MP z-?@N-y1c&r6Di0NSGq7_+wU++UxLCiCuUTGGw|;R7jm#BQ%-E`--Qdo@Ut@y?H>dv zHaoAJEVx6^POZS+x`L{>Gz2eR*p)d%8nZ~HTc+7nhvAbgV+H4y%SN$$tGhJkKR48I z?tRgfkrkhT!$w16vzj`WJeQ?CtiWR5*4Hb0A0M?jS53`2F((VmbENfRG>#H(C5?ugsR#izECu1s|_dZ(w`KQ2DZ zHkqpTYWhn+z@yOXEfe4BIgJ|^fZvOe4gly3oY-d?;6x6lqPN8at=&u!Di|&f*mYSK zzYn^V72Y1q&`b;NRnXDhnk+WKK{v}(ya{u8%Pm!9%*AOt*%9Z6%3bG+Pv^^zo~kSi zGlCDYuo%zYDysTw(?rMC(0#)H4aG;=P+;*&75jQn3sd>w_!7u2G%($)1gP##M`KO% zv{Y^Q_(0$`-!}h2>7(3Dv6tQ;^P%OTf=kR+`2cuTe+lsP!_GHf(9`ie&ybp%QL|~> zS5%(?TQ4*XJf(GR%q}|AGaXepCAM!lb{^6`Nk3yhL_L_iUQe|!%D>wIxbL(3*?C{L z`50}!`J!2axeKa&y}hJNwr`j2t`DGBYpSl^iP^FXx!P@gcZVw@l^z-0;j<_web`8X z`^jor$poT3B%>k}fj8mZ{`TM<^C!OSgLu`EV;67eG8dpYm#_IZ^YiI!sq?Dyspzh% z!=j`t`wE`S@LEHy=Rtr>j${jNd}Z(b&hUfp#*^Vuk3$!=3S<)qsd8XhmgH}is;gp) z$-}9cB{afpmcXJx11feRh}y?(-t0`{DQmlZ`1ASj$4eHJ*Izjvy%8nZnUVsLgj<`F zK+Mj!(s5$TB*Lj(6jEHka7Fiyr}pDjd_Pr4rF@C!v0a!R;jR5R{oXuF7Da_jUrilu zArjkwr>0Nzim;r-JU40h^84d83SuSP$q0blI_tV%vCpU|AG68wd}rc0tc?2ubcaX8 zJl(3bSdJo@BE`NkUaSO%iuuxBVzdLYIv@kw%Gfue!(fcfxkXzXCBr1g1PZaU{;p_H zV)^S%VK=O@5m>OMUg<=4va5KXwfN*|5AT|mWY1v9!KZgN2)*Y;iO?34AG)3?*PSos zqM;_a?&*3rJ_V~6+V!b>*}Bc~o6W3>?=CrPYjY;&Cl(;$VE~<>9qobg`dmeB`S-rv zT7EK_tGVY1d6o<&U!c3X6jzG(Pi6jd^@LY7yygfU>P&EH;$9rA?6K>6lj)cppI=;L zmnBooT)RIjudeeXc;%oObU4<3vE7Novvo_F3qAJj)KDkvzeqVNcj6=J@3-!fkv>WmS% zhv-T)5f2~k-dGju9 zC>O77^WW~g^|&1*Z#~TEH?^p1VuSCxoqIqrtsvShg+ca9!b8W0A^(|O6=QVG1hZV7 zwr%%@L#KE3r_y&Z2HEG4cM%y5vk=@#sz(y-7q2@aaf5=1bmGBi3!l`YiGml!jvLr- zAC8@2hr~}H+3`876JW03mC#B*fSaX;EKnWhiIM|VuBl7cvCM@Wwq889NU{Kg^Bjk5 z(`5xe3TKF}jC)(i5r51^$0b-mlephJ=s)6w6_I1&iQq>(TfXIpKc)en;l zM9F7(BzVK{^+|#hY6mpvL%2IJcUu^_7cjxSqTHj;IR&QXA?b+=?Pf`E5|73P<-EDd zrT*&z-K7u&-;S3)pKPB!5CGNKCmCf_GLwR%zYH<)>R8j3Kl7oaO=00r$bwUY!&V?G zQq(>6aL(WOxoDVsE#0fiCOzRV3*32%Zc#E;H*xwa(f0*$h^0gvh9ina=oaVm0V-~r zpS#4q>T=~=bHrEfjqvK&W+S2q&Rz9f_hH0qUD%#=+{?NTWiBCcc(Z zH3*DQ)zF+V%TA|TRA zGgwpmV^-D_V96XV&#oLLAIqh-eH=k0e;K&!sm{4*90LKre;j2Eyc<7>)MgotiEeap z){3ksHG1BOz@Z#o>2SJe$$($|PWO~F`R1scvW}a`G<#~*cK|1aDkA1XL(Q|E2n+^H z3Ud-6wKmG9LpT`-94WT2I>BsEa1Zg6Tppb@9ybcvbKkE^rw>MH^2Lv%-CMu<$=6h~ zj=ASrCD^6WGu=bJ!#r`d!X$A4r;h?V*I(L10B7thXSQXV3A%aeQW(_HuvIHJ|K4T7 z)-a_dicotg9J4y{A417JzTI^dNzi{9=cenz^&lWX9}$$AmHLcGj-)Y zKy(*-fsKvhDXTzm9cySEu9Q4~MN4_{ljdx)+)fmeAzpvDQk}j&;7Od*zK3UZJ;urn zO@3Jqe&Q*u?WQyN;wN|9Qed=Ru8iBTR(8_ePSVJcbZIxDi5>GEAT-t4`PVumlN*oL z5d#e~jg?C#{V@*fl;UqjlOk#XB48;{SggL4OoN)x87k8K8)mVOB8x-!;wj3nV*=IQ zcM8DEzMtpO?v=B1T88v&BBsQZGuJ}TG$qP(CCbdZvF&gRX6hHQavT+@3s`B*z=l}o zVTN3xA@um^TEi}-3=wuH>NNB3&#b&H%;8B#VJk5k6RKj^S%eJsI&)p32TPp=i7QBJ zZ6-rkh0zejzT7|QoF{GU>p?Nb-(P>}dcuxUa9sxd892s<2^Ff7Fua^rJ*y2d>vl_!N59+TpDT$rrTmMd!0`X;;JToNGg|g%}0rjA2PZ>x+X>I z^h}wk&R(|gTrA&&QXzpm zb=6A$pmr!wcnHpsA{W)B-?1a-p+=fT!MoeaX2nN8Kn;(V0`roRidRqV0A57JSY-#m zOI#u=(8ENW78)+AtO8FiS7IC|pvqzSQ)=XwG+1e_X@htc_<0h2-}fsuF?CIfIFPbJ zc5eno1A#47@HDJrb;iuxVIfPOGo#jygUH66BZA#1h1Tc^ylCq;F4Ku-!uVlHdq+t- zqrBTC81Jj?F{O|*29Owsty0~sjOM$fJ8C_XWUMLPm7~#;4rLu0%c|6&wHXDTTT&g` zHpXB@dBcu%iJ?;X!c=*%CNVM*>>s3$SnKnwqHDxb2jEW`L`=DCVOlGZZ40lx;>U;j zYDDgWlHp^bM-cMx^gkj~X#B7YFmiZNTEt_8ps4`W%+qX@Vq*1hgg#oYF{}~jZQEX` zW#CX%YdNyyp=vP8#7t(PBeXQN6g_&U@Ye3u?nJ9WvBssO*$s7{+@_H_#dsOz7^z0t zBm!srWvTG-L`}Gewk+G7C5iKc$&LOv^J3MA!cP9!8f8{1O;3TL3hS;O51_F8Qo`|Y zOGb;d!MuQq8O(P@uEy)!bO9oqCMbf33(f(R`n=%1qjH&nhL9-1^szve1|S%0P@dix zmnzvAWt>N2@e+h2$c!0&w(FYO*9)U6IhR&6z0Dc}`fEF04&6vp7{|XpQ@dhZArAdx zR50UUNDX4sY4qK|PIu6lQz5fMT-NIJz~>X{$!0a7NgqDxA|{0+I2R9W!|c5ltuo!GTz`&{&QdleD+UPy=D_UW+42Z(clcCDGRF^tY4fM!fw2ulow5i5o5 z8wV9D#~Norh2_cPFviAMl#~uJFE=a@rx+`H1)DSPXM*jQ=qUa!?1>3!EKUO@O!Ej_ z_3&C>_=UiVb#@5IRzwDVcYCBr+z%?XHYg2L3S5P4_e!CWHqo#kJ{(FVEQ}iro9&&F zyL8lNWlEeJsE9D>-d^H1o2#Fe7gibR3;bhb*naLFb>;q53<5jN_+^QmR%n)#7sQ9~ zcm>|4rSAvb!l}F8j97f-Yzkmx1n3@Tl15Vm_zu*M-(>{d13EC66EK=#9H4IpKJ$ZS z$AaF(*8#)m(|~L8?;s2|0IU3F@}R>Lb^Km<5HaCvvT2;O)xXF@I{T_trO68%AuOhI z_)BbNS8UEeDEIbp2^boUJRt>wm2iqQF8Ww)G0sluH*VGJmBwp*!|5Y5@guJ5_x;zq^y@-e*a#*_w2#~3Q~+O@QbvNvg@vyuLLw^LLZiUpwNc6 zWSb_P=ltIIkJY^^;@MK`?IHb#$EERNE*Vzw?vOriJsTVT-DFzbEvaUkZrU+AK`$mZ zIq+zhr_I{01iPDV9_2}R!r3%7IWDc0@sa6DYl~H_c*;HIz%G+kp*^Wz#Jf%UP|mKr zu#2hc@mygh2lI5IaiC0UU6`?AAUChcpc-Byqa$SyPQ%_k1s@D^U3?iz%Uz93(KOZG zPiz?SWNOY+EOoaNF(s7^pFcW}o$cQfmKtsT=MM6!$PAH*jplT(TFKb-S;MTIL zch`8?F4CAfjhMQuS*xZmR;)0YuUxB&w970{_^T;w;C9oE@B7&|F_`LBFIvXd8z!|6 zoXJmZWE65}%Qms`xFQtkr9q1nbY8b4b3jX{6@sM#+w)VGDT4)~yxi523yK7R19ND+d$}a7RSDRnE8VTkT66M!RGc}|pQA~|)4Hk_z zj#sLkB_I88QFc^QtCm$Y>=u%bwUKnnE9GvoWRwDFVJ3;L1piwym!Q%vGp}(+2Gle~Q4{FL+Kx)?EQ;*&i6 zX$`m;uxAIS``?q2bJ!KJhFn8vq;V#bZPM(Xa@vuM>+%W0kqQGD*b`&XLmX(>mPpr) zKl=_Io1G(hAz=;NEZ5@JpU|q)?oO|>_S#4dA=0@XOS-llKcT4oco!+6=hxI%Qrp;F zkN2pi*FHRVS77%sK4a17&uKdqFN^N+t=CN{0+YpVpp!i*2?ms)OT zeQ&=@ZdbF|2cOuwX}Z7FkWA2_;8E0O#s1Sfh!SldvJnIV8}>m1pE3yy{&gzbO$5v* zm>1{KBNe<0(Ww0qB}1z*lNI?fLL@yD@dRd?&7ZH-2SE6Kjd4?Y^&Hz3sDoBjm-M#e zPW8#_obDfjx|+V8x;JL6vn=aA`Kr3KH7yD;qDQn6X3-TS4eE1LDiv#IwHw+tu`3rA zVktKWz1Rk2Yc_`FUQy6bi0eKm$q5Uu&V1_A4({1BsVg=rNmQx04KA5$7O-b5rPUjk z5$ezFAQjTAz^Ga#{txFW6#*rGhcGXjHmpxCKDhCEwzUZc{(zVWp%1e`7_C+#IO=$J zgEcHF)eqpH?`OkSIO#a2p|Ha| zg()Si?^aN7JR z!oBDh^4>kQur4~v<^fgd;AU*TOnyaeSH+E-$8DRuT4iqaQw1?hscgnRCV!A;QDmaL zW5}{O>ADLa87oKUx8;^5;LobUBI1RQRJfP$(d++V?29IE2fi7`twga!tp6LIu}H<3 z@hTgi{VJAo8D;9Xs1q}xgr`@{*D+&D@F#4bx0JqF7)ZH*+u-%+D7n;P;_2XBc52$a zt!j}DC8}v#gXNX2yNo_3)G)Y&C9xT53N97NwpB^Lgk6K2OiMALk?2gRzcp7(UzPHG zYSxFhYxu+0qb_1YYlfS}yf=lKS6s0rj|MyLGXVZ}>kqtyn8_!Ju;RUU25(PtN^UGm zUB%x*svcjTCY5Yn!OR@p1)t>Rf~D;1op<~>E%-OQsf{B~iW|Qhy>0Aj`1?a08>B~& zF3_rnNXfVP<*zrND$y}zP{qSDQ0OVjDcTI*Ss$|6WE4$^nwAOjXOxia?YpYu#)2xK zD=O5KOloXu@yOb_`lb8m;086ItWtj-ELTt4-!B+QkJHM~z*%KWAl^Vi1MIL)R$4RB zf>N=;23Llfh60s>^0qD=S>F5n@nAzx@e#Nj4aSxz@n$e+ zS=BuD<(+p6vgxf{I`##3Y?}VP>+6g2f)?7TW0L)o8)S)rAxTv9F)>4JWyPB|iwK!E zsS{_a+cNmb`9=syyZ|QB|HF(>E*j$MI!F|6ZI`X{Re6bR16KFYs{7DRVBn z0B454kE1BFxV_iC>n1Hlp53k9c8vqG(WY{dm1)fvn#z|oJqLPUg8;43v$+~NEluzF zb@*l~tX67FF5{|G2|!i{!Wid~Y!j>(IT z8i%B2#U)&8!#OW0=zrAUFn_T7>a<3;t%@f7e(&gFG`}0wMC1A1Er!My^?m5b4Bh;k z{~TluA8<`{{t(%|J)a28@kAv(nLR|`ggo@%?ZmpNNE4@-C(;6`5{AP%&PdxN$GxRBr}QQj*dh)b8}kBy@A9u za46Y!;`2D1D7jo+?7vbfBpjANAAz=zwwye- z-si82Z_y{R-LkN74&Z^HKEp1+Us2%2%}k<}7c_;qP5zB7%Oh*NcKI905n7XbEOT0(B||XYkR;T3m92TT0Vv5uB@0B9^JMMepi<@B@{leKsk6>-&|YX{WRF zRJ#i&mSC)I`)%KpDpkAd5O>AQW>36bMTm203jT-O2PN&-UK>#jPA@%nSOYyY`$Vwe zv%Z%C0uT*HLIqlRa(v0yrj&jR!`j~qziFPSrXJGIQ?9F2j@1b-T%fJ1L{b&1b zXdvXvhA(8G$U))ig~vu!aMn5tycs#yXwT|QNAOC^M`?~FXC9(FiCzp@f)>uWHZJ?< zF+O6}o>m#64BpX{%7QwBlwrnnBbbD|4zZc|efGDZ6Bu<58Z;#qJkgV~yj>@2|GjT! zJXQ?Y?DF;d#~AtXM8c+rj5O*aFplEd%xE0Q5tz17?Z(olTvy-p*ATX!SC+cC+-|uR z+T7OmI|y8pYp(Nm;@CL(oR14}1Y`g7Lt-AO&%Ds)>vRzt-8si~K`B$tS#qt2ly-2J z!G1Ne8;*vnLMz7AL#?SMe72>aE5XeTy5Ig@MV~QYVYrQ<01e|OT70-+(-_zRJu&86N1c; zYC_e)X72-$(4UOpz+q!2n$EnHzQE1w3QJS8m$q4-mi5x({Uu&$e!TN|jA8F!t|ZVm zI|dy?2FU5l%+K4 z6HFGk6;;SJlpS%gNxJ1!?U~pyo$d5sa~@TKetEZ_cr*@j z2!9`b4|4`SC3D+Or|qs7pc~Wto&{s2hz%EVQ=xO=F6tNP^HhP)Fd`CyFX10$28gr4fm`OL3gGtlcciB5(-QuU3C>- z`GcK=vDxLp3h^&Vf}@mWI)bxg{ux)n+{MC>8c(>)2sNNzWba=uJU<(`9lw5ct&$iN zmjr6E13CYI1_MY8KJ%=|pN~11;uA64Zg&|plp>R20=vf()McXFJ+rpGQe48BV=Hi+-XRY48vJ{_2a+x? zF&B+28%_!o!gglmhBRt+62Xczi0?EoPgntls(tRWV#lt`PUr0}o$ZM^!c@*GwacG8 zH7;q1BPI9o1>q)x%BW;U#Fa|b%>rq8V`T+)`PelfNwB)|=SROfY5OcLFxd`{V(m>w zFsmc)R3ACV8#(Q&ncOwo?*XsNn5wQ56*QeSw%qDgBbS$^8m4RE>WkoKJDQkqFrHVz z&UKnH9!K4}TrP>qgwluw<2E(ghSWAr56=tWE8wlJu7e=^aZrz@u61vJ*Je7+^rD&O z85h}m9TBYvGBd+A#C{0pN_R9?^|Z=qhKd{j+O@c&EcW3RgqSQdd(UvcB1r-!Yfc>F zsE{KblTU-~1YrBlAbPA|I3JVO`m20ovqzQO>S`LZ?FYSW|GGg2EsSa{Dy_;}wh>$p z$QY2KWuZMS&7u|H72XdXd)>QI>=+W6%k6H5p=aC)Rhv?{1kZ*3LDA@%?=#lFk!CfW zpW_!Z-JeK29>xY7_`~*fg~_gT-+Ah`g|18B=U&49XoLduRgT3>@+;^XM2|OTGvc(4 z$tN&&G2`TkW0_+gq9)P^W|(AGOg&9ZF5$b*K6^j>f2JxArVrFd1jgbx`_X@Q%*S}8 zZ-?UN#m@0Ee~~lhXIh~>;-oduw9l!7N7A0eNjNcAFaMKN*3ss}5pEpC1AQ&M_CVNj zzMuJY9?)+|&bg@O%pghMI4YVnn`p8>fms>{A77T~xSoxYjMbq(K5lLfMWgY!+yi}R zAufUL(PXw-JwJT^KsL3(P+E2i@!VYkrtSHVL!_R6tjo6dMmk_|g$XT{oMXf?9^>2&Uh&=d{7qorAQZJ^DY z(&Cu8d+5;{^nHG#_UCHM^N#Y>%G4#Df$uDtk;2pA8R>}RXgwzhb7Te(57iCEH>6pK^(K zowz$RA+$rv@n-q9u}-F|=*7kcZyt|9H}8voCvKgo;k8t=zWEFVF42j|6vYkvkXC1z zbxCgew|`zuKkFdtcFU6MEUAV!{|?TMJU;&8)@5OL>8aSGxxYIE$Hv3*7(pX&m<-uR zmPP1-ww0GkvT+?M^1O@IaivrSrk%J~UKRN%={Jt<<{tGk^K9Klah6x-{TSa_zE{Ex zt&!`{tZo(grmh$zR4UrMKsG`Ga6XRZ^PI^;zF=achUQpzU^eS2g5{(nw{n~gLYc5= z6Ts0DS0M8VuI;zqXd;r!-6o4VqEoQ}%T_g~lg#9@0A}8yIHr(UVBFE*LM?Fu;2Po- zRf~WiEv>5j5D*V=fCmY3WZ6sz44sPU19S?tp^F! zt=>j#2&H1(md6*WB${*7_WNcC!4~Jq4#Vg8PITQ;`iZPA!@!kIs{K_rr--!g^?2Y- ze6@f21If!d)}09S2QBudtG!Nb-VUl_7kX4j-e06qr+@6iTd7gl4R`$`n59R5e8s_B?hnK}SSn|uu|PU|fu zYG~_s*H#S)sQ_Ri@S(YdZqi^vsXXgPmV!AOa;VBlM|`GaoQWc#(u#49aC;fq8u0wL74h~`%*#|~!vKOn zZK~>zYzd=rQ#?GhjvaiRlvlku~S{ASeul#xXL{0=F`0MJ;jlY zTFqtw58O*xPatWr^?BrNr3c8iN>7OFBBPyzbyYx~M_<)3zZA86D8Tx)z~(g#+P)c7 z*{O&S>QWgQg&hhvm|-alH>{lEhE72d1r9=fc6wUFksDWx4!JUx*zhzPR<}~GmKNk3 z+Zc=Y6ByANWKT_9d)@jCw|ytaSGZ$?r#|IZ=#9Z@sif&kZM5}ei8dMd`km}wldo?# zrc>%y57&Jpo@KYIMRp>z`HM8)2s}E!+rJ8fGA%>S6^VxrRZ1MaEXyv5(GB$9N&$NY z+;QpmJNude>_eSqDmVrq`wh@sw*}(#>P8)KaMR?VhcJpH8Km;$x-_*Yf}&+o*9K=p zPpAh9#ay$C3+pQ^y*$ev{@x`_;yD(&R5~=&ajub`l^vHm;igGkiPZ4-jarg9%q!c| zYnT$3sbiFfPn)>Mp<^cPYmag6q?br9YC3#9H{3_i-_RE)&x-r861~%ynwbfC|H`cx zGL^SX2Hu$MLIIxT1nYghA}pGMa_Yn##Ol|sKm&rN^Ni%Dj{lk1TVx0~t5<=RUk+}p z`Iu%z@P!+l7*|YL%_(J$JGx9+ndsIs)UQ`vdcrNaYpg(gIN6sCo5LRNwPIlvB(R?^ zQ7B`dj};LBQjysTx%tPK+6D6v>q@=6q8{djGXIOJyEh7isyKbt&VvwC8jx z@l=qtu<3l`iM7wAcDD+}+84ZZg)3M1rnkJT(tcJji;iy_j8PaMVDaKQkfa_y1dHY5 z8s?!)orw1jFMZ}x=tJkO_cR=-?r0`C&-Ex;;v9YK;kNM%*O_nqK2kkPViY}1$mYcr zx^5}pdWUxCk)iRNw;)}%(mfCC)9#+9Uv~cFa1T6^x$x2NY1Q>uD_1+XR^UC6)5_4j z5gV797?mbz%l;DRmMCY4ZrE=i@TITdYfNnUAsd6HG`&_-bwS3x0+ zi0F0vS5-;o#B9)H(GF?MvLuzIt}0B~RQ~RGnMkWIgT)6ItHCJFVZO-CY9soyy z>I~b4ut?xya8sjgJgriNEOb=f8WHfDvBsvPW${TUFw;srs4d(Ox*Pf2Ld_oc{RA)A zoM$%2yCj=U$0NiwVK6LXo#DS~djfF4T;W`7MJEGAufMt286$6dXOfZ;6+6lxIklFf z07t~RgGaLz4QioEJ_eUcamV*SnyMV4I-G(d$AjvgRz#ss38zI0Je2a$Bl zE~?&AuB8=u;1_bgj(!32x52>GQpdBJd?l;GnZb3QLxW{i%Ke#3l-D)6Yh3;)!zTAS z?OKD1W)W?J##QH9qq?fTs=3BR>qe`_hUIE0xBklBr`YHEQ^s=9*Ny~OoeXG_V;ni@ zlc#h5v8h6at1d%cPe{6a!AJqurDM$ssm*T&3Xq#`1N0fxEYDx%vXNu|xwXMRCM=WU z(w-C7cE_~z8EoD#HQ7k;1}0f~t%^XA3y-0N6`tNva7vt!`2dx3UH#73T-aO03=#^y zN7mM;hcH0N7+=ZbNpZyc5-R#b;9qdWJQYs5^kjxiz(mNWf5OAs2@%#N}LXg&^yG#=m_ShgFgIZ z3EoW&k{9&tXl0>V*(rx zlhB%;O0cF*IYZv!?NGD*>C=4U(7ill&bwd=}A1 z+NDYFPo2cR5NJ`>TV+HezCZ7Qlih75g0qPc~gHm zQh2#7_H&Gr(WSrk)axel?rAnFHLt^(&3!*I&`qUt?B)cV;F|}Yyn;RunfL}GF=9yb z0;gFnxUHm1$3CLwFderw(!?%37~<7lm$S}EX?VOwaBX!zL0%3zTaNWH^JS-f9k=r$ z&uQ8ZZudCp`nt2NKW;Qi7-`uzmS;4@zFf-CZ`5oV#!P==*k%mzj4{Ft+W%?~_|ymr z75xckz)Jih?iLL7r})PO8oHBAfkFxpVD54C>pMi2B+WGZ)!fkV=S@+arrx;KFUVNQ^MjiLgu+((RMLv?w8Mj z9w6<;2OEByI<6;F$@io?R@y(>hb7nzlQcK(rH4G*~9T zc|pG$@v>-^e{tq@S}RjOK{OHzuxD6_V^IndQRiDlW+V-e&sl5WDs?3iZ( zu0Q8adp{qNK2LeMuFw{dM=Y7c-;F{PbllWoV%a9`MV1+-gr89~&5ox5{Cx9t?R5F1 z<6IJ4WyC&gKesh}=o{`Wvkclea1*@wk!53xVk>4Dpv7g%$o!1z%oSj0u%+pR>Q7}P zOGZf26f=~aG3p4){K`y6KP=ifCtCuNXxWfjG+EKo$5e@&lXv`-)Y=KvzrS2-{cI5X z44T!Qpw+0|f-BUY(xur7g%ZVgm32%X#04nQE6?9F6R3lAXXZbS`kc3^8;#h_cAwT~ zGAyOGBqvqpGHk?F)SR&;6hoy!v>jJ*I9B6WcFk41t#maLsnGbDB4wauXx6CIrC1jJ zOOY>VyW31ha0(UpvvcJ`W!vy!Oy%gLgk)-w<)sVhYTr>@pUo4S7K`1weFkWTaOILk zod$7*G75FUMuaOR+FCjLWXgY>zPIwA=IcJnc7vu-kK;5t>5y9-8Q~$O5>MmtA76wH z1kW!XB=qKcYT z;X%32WXwrzQIcdaKbK(7Gt;Doe#z^{0?~MejGPD{v9G!U?Mf|eShY`;tY0P+Nn&2U z)Je4mFitppqxt;W)X?ke4w3B&O|1?G2Vwf*1E{@eL)+SSqugsfbNjxv>s&ZL!6S*z ziC5kgPUAwihM`^g({hHybAy&eog!-J-(bSTI53;3YVT7dInT_a3-X4RnfWa?+^i~wp+_ub-NX2^Fzz6CKPp*-luHHse8-r?{Mbjb#eim z4)9pF;y-trKiE=2hA_%cfj3Pj*bO%d2986$ZSeKz?IzUg3zQfsbmuBRS8(KVYZ)?s zc4OE&u@!Gd@!_61O%45gu<2VdaNlT-|FtcV^OV_QS2osFQJM}T8r(tom6cEGFCpy2?Z8{H za%8_@GB!08s%4gF8@wsk@yXIAR)FKj;3rnjgE9^i{3s{3P~Qkg(76#`7RsOr>cids z>5(%>3vc`zbVWn=Io!~{Bg&=`9>=Cgrtv>v+i1Rkx6Wtp3Emi<(nxCqe6_L5bOCrt z`==Hm+C;Q4#%$4IsyRjE&c?Nw2+IbY!&kDt`1V45w}jv#!g9@Lm_F)Z_4gVZC z6@F^-lSGk7TTyK_{423chK6kNX1%NufdjP}5M#CS{r`4&P{sJ-^VX2J$7~SXZL_Fp|7G!_m z!Rkq124~0qL#UTBxgfI@g4vN5yxde`_WA z)p|soiVRSuX+Zb5_`8WGoUrTi=Ew3+n>JTz*cW`w55=sgK4j#H)(=^Z>xh%=Lf5Ak zTSQ&44D(h?M=^)KQoTF|=taPk*EGkSyjtv$tNxb| zRmaFbPrU1)PquI5XY_sr{`R{{;FzN-4G*4Xc7OvYJUy}!Z#uX^Qx3W-e|utgylSEu z(whMfsXZ*OZwZx{I&{f}gGQVS%G+)nst&LRpUcFsvg9A%y2t0-+a0i?smh-#o=?)x z3lL1b(H|H5Z&aWk7OS774N|FwKT3HX=zaw!J2C7Do$A&g)301=0rZzW=l53Ttbne? z4+EX0;LOWF zi&VTlKUNb&^Rk%%rpWZLeMA_dy-Y+uFWyl3bxhzl*_OK?RJI=9T2_{X&jcV{QTLE1 zYYH95Il;4Dxq-&1EV_*N6YmW_oHkrFz)AakDUb?n0)>s7+woW}XRq4}x7%-L@>#Y~ zn*xL4()fxbj%&Kt-y^f!Kq}L~J9<(EhkwFW;SLa@4(PX|?1@C?T4RzY``nm^y)k65 znC+(Z}!3UT`O zAV^*ujXF#yFRSPOet~(n{P|+!jxWzAby2el#_oX6!yizI`k-@oH00tb!0%gn#Q4Ca z7=((#roSF=LpWf?5cYtHEp+n|;~lKv@pHkTq>CA8MLgh%+R;lo1rOu+BFd}*tM>vG zk@yMwxA1b;v@O~=Pn5H8@OS}9k8|a$o$UbanAov;oT>un?HkjHu0!V5d)VLMDV@6F`-QIv;^ z?GB|Qpp0C!u_sC74RM`Fz(a%)`y_-1O&^x*JcQFfJUfgLJ+f@ws|Z$?xoi-_f4RPt zW{A+?g?WrIXhJ=R6S!K0`CEX^i024nASj{-m+!y*U?gKxYcwi_^m`~^>K>VPNTCyE z%)D{$!o(^d@9mGSp((<`Fe2#en`NqYC<2UhIQH^3^RUVQNq#|5NZNjrCHguypC6%Z z1QKw)(2^hp^`Qy}yAVY=5{0q^rN;%i1&Xs3MI&Dk;hPdoSL1md`{Qg589UR|*cW3* zoKUd>`p+BVPfz+ACuaxMXvcuzNBQzXAW^dkmZ=9*JLn_eEM%T8a20?ymnZ(`tMh&DgiUs$`4CSIN z%uAC&ls(QIo6H4U&hw7pJCTH7l2p(m4ZIKgu=9$C>@sC;&#EgmE5%Zs0WQ2?_Gbjr zAh6PixzdQp`6?e}iF#5687G-18Eg_sQ3u_va(uJFdSrrdTf{y6IG#izgekl*;mRjM z532vuJAdq5g3<_nw}G=(MCM#A7E6DjzcQ+ZAsw3KU{T|1DW$os;<}dEn%n06%K;kF zcyQ6iJVp4v|Lf#RMH8v~R4}b&nKktbk{7=Lf0i?ym_nQoHy@6CH=k1Qg-?ORi`>i7 zqIqa>%pLm-L6AGx6kiwT)%ZsSSd~`ixmyOa50;wHfbYJCp`qkrrQ5 zF|?B+?(Fpq)FT+jvc)=sD)zeQdDyc*1rF+r{ZL=RTDf!)jb-7g4uAH76!^yGZ2dW^ z^=Ki@EeG~`1xCQ0u_xUsWBDl)A~I}uri*?b7v)a8DX^I3z`aKvLr8kOkNe@Qab68ka2fau7s>m)bsg&jbQI5+#y*Xe~N$ijnkj z-!C!cV8+6lZXzUljCT8^j@Y4pljdN?cmv>o!_hE^BsN;&$PT44pv{Jv@jNL_Tv-A% ztWEpcSn9^sUjS=XTW5!_@hQz_X+qqUhIp$FyNzAVV4{TjT%Lzfx5GBCq^&v<$;-@t zQ}1pih)vYb?7Fe8$asCZL;WO|j=xhX@WAaqw!SO>y#Q-X8Y$?Q)7n^^=``;?a{2vu|8Fx!w?)z7@wVkrKa2>ca=^PI8?IIjD z6WBlpd)@Ag_>QgYuW3+N*92k$cbYZYDuN@ zPK?kyS9z3a>Kp09{Y;ycU@rcara%Ph3uo@cpXc3pfwRo26HJ1-87>g>Np-gaRWc3Q ztH-M_=x$K`tQ&q;lgZZI5##pG*dzF-eUnk(>cd!OTSciJUR9oY1kKhH!&R;i_0HVz zLnjPCiK79Vr}_TomCUQ_S_EQko(w5DTZy208NL^g&e|GZ+L}r?Ss9LFjI#SdjhuvI zzzRiiT62jJ;bLWC={LCIG)S^Q$qIjw$Z5FB2t_3b2_0JATRS|0M(w$1N4Eff`e(!h z6zcv=AflqOqaS8PILHDn@88wHM9A98tR}YeaWfX_DfZ{6E}Vj!H4N8RXXjj_v)@Iye<&i;OBjK7 z2=qwEng{XfdMGID=W_@O^9PX*;x(5n$jeXv5IjnbN~G`8l`KNQkC+SwLlCi-jbhq2 zQbHbu&R+r`5k-+Kg*SX$4yP6;8K{KZM2_KyHA$Gf7b+t8A51M&^2twx0~xGZ^e@pJ z*>WCKZ=Ihn|A8E_9(6fTk{t7{TQ9O15Eigns0F~qoLVnfFK?lU5;Pm4xL+cfjL5-* zuYlq#NN?FcI6JIza4P}WH-08uW}s<4WWY@FX`#gXojmASlwNfd)50=8$Jxfu6*C$O zpe)fk&?x{b;VHj|xyeseGr*zXQ=ncJCg@uMCQe@*Y5_-pjXAB}Av3&Pq+Z%Ng+-uN zm?`j12x9+KA!|Q$#ZD0FaeTql0+(K8Gx}X)6korTLtfE}IfY)hMwnV; zZIIRAl#|$ki8-iVN-~{r6`*QwrDK-RnpKu~Q*t;yfJNcn1YQvo?=(yA3EGb2ET1d7=;CbB-vvqOpM8et6(sh+l+1 znva$dwE3if;IoQ;w1v9Iyh1f{pRn(!yA}W7Z<%}HXD?38ds$~0{cH=e{XEILgI$hx zL_cP2dRyii{BH7mdR^yoK0VENJ>YHuhF)eL{B8=-@4;umPgnf#6@q@34q;&TA9JFo z#=U|lydph-pCDZszUW^u`!{oze!hG)z~6{n@xI`=BDO$y{GNck!nT0l;OnWr*xn*{ zWFFYJj*m3E^bxvwUvJ*0l1>1(Y##8plo7gx@b|X)wzHvrfu}8gz6GzQP6)Rk?+~}N zrwF^cb0dDfg1Lt;1&ODweme@Ea9?=0ymR;uwgo+f1Aaewud+@cw>$jH5@x{}O((9Ntfo7I;guzS)l+3xCTL zWOyamzsQarXUIx@f15InpXL5O6Mp>choAk6m#p*${OD2kSM_fz#oxN$_5$o*5J!(h zzhF5TUMco3x}(Q|qy_k2`+kMkq=g@`-`fwb!@D<8*7w~HS}A@(mQR?&JHhx_DB1rI z@PD84TP*r(PeIl<;?ZN^ua;l^zwkdtk2?x7KXDH4p})1i{UbPfg#R^=kNpdH{0v`C z#y8jD{Ud2%=eHv~M~^RLr9GnTU)KvVUp+f)r$YQytzpe?j?K-*88dE###)g<0Ldig?D)KFI!u zVEYRfW%tHEdi4I)B`@O}>F}=ki~H>W+RW|&~e=hp0 z?zbPrQs2iPq*B`-e3_pgzXe1C5fS@IzqDbTM?#QGO1=mv3>L^NCP+6Ej@n_b(+=F( zo8~!sT*Ouft|?0(mvne8JMu?`(FxyY-H_r(xSvRA7wpT0ICK|QPxM=!fbHPC(t^*_ z0mb`L?sj1lkk^mShaO+02n?P46 zfKr|qfmLYR%`hu9u&ztP-6k5dVxm0sk^)|XUq1ufmuUegr0=?cdLPHJ|Zj* z^T$ov)?u061-^+;wU_{VcYr&z2I5%* z{Q3dNiO{?Q@JaU;O2Mk0%6v%qME?U%oX5=~sM!hg!WOP*2fOM2)SzmIcMYZfHXFMK zn+lkS>-X)3;4yddr^k1M7=g#wXHY9OH`N#C70W+*50sk&`Ouu%?gQR`1uV~4SJ=0& z6oXi+E!R`JS;mFX7ktqgqw!d2)|rvNh8mdW8AM@MeRjC{N?vF8YK3+A53wOkKRMjnvycc9Q zjxU((_Qa);b7e2sDivo=_Ed9yORa02ZT*dd_GaQHY5iFJP(A6}&;T!n4=pdVFVP~X zh=W@-#40IfEnSKqO&7@fDn;BJT->H5pW~BXcN=bwGd7(&Z|%8`UK1}sZq^{g$8I`p zNQW+%f-7=2Y`@7TiVtQ_$jiR$o$FsZABZ6-P{oNw`shihV-g5gdhoQs$<)eB?QxwE z7s1v3YZKmMgb{mpb@m|f`sn(R!;e=3AEX|kyXUjM{Dl1Fd|tRh{gc~(JdLog}+gB$h$MFenFDv_l7V+a|y zQV@=RCI=n}sD>qPB_LMzY`nO;0e8ZD18z$8c-&e|)SN6Sra6(6j_}_;1W~jo|LfjCIz7tx2q*tx-CpauMYc^$7XodUbqCzM1Px zvpz_^TE4=+brOPDJ%M$;R6CNbuh$3(Vm+%-&E zG{?j%>U{Y%*s+dwgOvY3C^N3?10FL!gFT0QM*h=i2E|XY*54K+bPb}s_C3Gw&|zNGkfV?RmV8uS&n5x`>#Jh^iV=>=aq%;^S&ZbW0`@G7pi zNpQ&@*oy4D+Yuj>JL22>1#vL4Hgd}(0$jB;VrEbL&)-!I|2t0Eze;?NYf{aQ(J>%> zp!*MRuL(U^iW`2_0F{jYWt7c;9yX1rD?Q?FBbZ(yydAq6%nQ&?E{=~sUGrs}718LL zpcT%eJX90?M=w+laN4osX(DSN=nqa^m0Uy{d}XsuCyZMU>nP zER2<47;s8Ar2ksPF99UrWleLYS?h80^5Ft`Z`HOVmGwMpfR4QZx%(&lJlo?%pTqu& zHysQwN0m)0E2|hWp550gG00=ZCJRLp?a9&YzJ%+@#%~-sty-%zjF}-ka#V>_DNwkz zMHym7?Zf&HyVTKcZjS^5Cz2#5JoFlfsd=dgc;-TgN1x~{kTx$&3_8{%?T#P)I20?j zDsVUwqYYvyDPo~x0~zdaJ^|L@|E5U;jqsMiH%52|KDLJAz04a@<9vACg4XqQ*Xvvk zTp0-J9%LkQ!(KhfZ7x_SYv)}PDx2eQS1AnSW5v8%`Nx#jS>u#|M^)v&n}d-z-pYAT z^LY#A63NNtC7dJ+uRU2ZR-6%rnlF zqH{~wA1q=jbjwbHrWlTVT@A3_e{{tMXhn>~< zPAr1!uUF(KL3{#Z0N7jMh$9X8WY(kej1_rSq*=HnctS$mGEYhNAxzY<+HqXg^;{OCt}VPD&P`ED zTd^!6&%+$FisHcdIgiy>&sg`rI9_KJz4OU8Y6C&p53wzoZ0~7xf}V4!?NLM;Bfj7l zdxC#uV}J-<(8n!tAg`kW(f%Qk8>IZIfu@&?OHH*jB~5mNt!f%5qc{DG#EFS$u;h`@ zV`ZPT7bmEHdYM2YqrC+_ih!ESy}ErLB){H}4>g(Izb%Pw7FBUMU%V#SPfoaUN zDLV%xw6rzSR8{@p7~{%Z&L^VZYzLrf(SJ7le1}L@X+?lvP$2J%xW-y2&`<�yV0A zZa?zU?ObNNOEe30iKUn}ZMAG{nu4fp)wk=To1WHooViZ5E-XUqT2IMkvrjB3bS6D_ zsM_C7lOrFQcPMb6%5Qz2)#oVxZZkh)I-N)4YX(~c!^tF@eu{_A`mS|cN zmgrP&(;g$Uo!6?5N!ngRjf5>&@2gk(w`oB?0sL*pqUbNLfR!s2h5rLJB&0yN%#+DG z9tA9$CX6m7@pt1yTOM#(krz9@5ysL4BZc^;63lamGF_NusJQ|6lI0nK(lmFZ$LQJq z%k60)bW9suwz*H*(!%n8F?LSTnFQU!o{1*5ZQFL<*tTsunb@|IiETT1W81dvWOC;F zuKu&m&A)o}>Wk`&?p?KNbyw~EJj)Zv_0}Qk6G}|5$80QitMk#lsawx8?F|&+FARcT zPXYE<0KL!TL#5M4uj>4J%I+!tb}@V8Y>3i{qZW)Y-> z*Rj~IBI?eI@!1Pgb+ z#hVQDAYlE;BMM*0pxrG4mkXYVrF_X%)6AbR~A0Klw65u$EBSBlo#Mc>xR z@<8La5Z4Z^B z7?QGRS{RyHJ-t58?+LpoXCl)g2_i$(H63|x`&eI*kyga0cK)Hjd2@7aa=N$uAMo-e zI?$+~ZIR4R{qM67CnK*0w@HsDM7c2g@hv(AbWUjtm-P_UL#Zl%x`;T*SP)t{kHxPI zVL^~9PIlk=pU-2@ug`6lpv}iMx1%)z+#&deVo52?HR7gNNPMMl|L%@h>Q$vyWL2gy zORu?Uc6R01%U37pUw3UT?X?qoc$(p%PGkIQVAQ;dSl{t>=8^mdCazKu|SPK}jg2{t97A4n1*v zkO>E-^O#T}ojIX5d=e!(jr3A=$}j~>fKlhK2f4Jtq0yr~gK((g!J%gd?0*&5tneNK zmh3?j^EN;<*@I--1V90?Kep%inho|RM*+E-2$-vik+dHyj@^#tV#72!;MxQxwXrN8F?d{c1u zk{eY+wUlB}_Un1frCWj3#nVdeJeWSKWY&$f93op-@3O zHB$c?#XwjKn)&PSCgPP_+ud^(7N;#=?Z)S`6wPDi%seoxe4jV|*cT(@B9)o{Na~m< z*&unt_db;y_Jn}rev!vkfrjtl(O67Kyvl~@Hqk$MbnWucNOv8Jd5QDi@dSe@$TlTo_x;)qaHbUx+cjsN0)6;4|C5A zayE<5Qe`)wZo>%EB2&UyD3^|~*NLl)7zfga+ZGGWeaDmP`yK5QvJp^;Y+R12oF_FT znx%FWQ>rAwP-GT@C&I^CX>NoRVTDA z!CC?{RaJ%2IXx=f&Matg!X`C$?s}0g&9COVbvL5AdHlO|c5&4r!Fp$|Pjt;Eg0O!%uKy=~VSa z*%s;*|8*NSDfDdp$i^SgtC}0SXk>Hpu4bf%y0N$mi4#dDrDtC+dDLr4ikm<^OVOw^z}ePBe(dTYE9vU1aQHn0w$^xpbG@Z8n*{zdyQqes00~sRFcX96M@&BvD@6 z96M7gCtaZ%#ABwtFs~quDY9K=-qPAE9a{H0CYT|A@~btM_8o+23q|fQPM!_3>H*~f zRV%$G!xd|87T$9x1HAI1%Ke_&NEF<=?XN>&3DUpg)<8|VRYf=Zq1+=}Mv zx=}?kCDJ1l6v@kuw$-*w4$-8lAzKyIsu9OJp8kJF?T9F#I&iON{M$v+MB*EQa^Wr! zN7wom5M$CQ>osT$5eegwCW7zXz`k1t?8?eSQ{#kR_uC^#Yftu#=Bo@?<8qTHQkvgLZ8%I$3+UlxA3QTzXp|O65mv5 zKA!_8r6)o$veXO>h3ZGpAw%k#Tjsh-XJHQgL-gVY7^L;juQb9?*>gm+tK!b3+feOs zqejTJuPX1PK1|1&pS|zC#(baUFZK6pue7T1FjaG{Wa`nLBNs(^tYyV6_fTnCCzRN6 za6}Y2ae^vrxxAMXWS3HBDD@+j3)r5UV`-H5EP}pD7R_Aj&oNHt7F+In?ptlWuRxD_ zTYYT=6!c4MhT$d3~aUnd0GU( zTfw#;G9sRYKccsxm@LO@*J9R3qY*Y9_Wk8cX>lEZ7!VbmPeGl`)YADuWZsJ7k=j^1$z`RB`m6@478)nD-R^_|0X5i0=#lC_Hf(b+E2&<_#io;;iPMzn zs%#!$ehbGR*_reZsODbw&p{lYiBSjdJE808HPyVGs6a{LR&RZbfgvVb>NhgCu)t@| z>?LOlW;W%Y`z`ehHmU;lBBAUZz1jw449HB_0vZ@21(C>h(7IF z7cPW@uVHAyU_N$;Z9!xBT^P6ynS@Fc18o!6W=0|6J!LB`lt7X`2=%}QA*yNR+$*TEC&2+GRwhO#M{^feGaPU5pE)G-jr=Lo* ziW!Z}iLS@&Dr`6xb+z)m-7l&T?Qh#!-FH3n^+Tn0BMMn5egLlm) z!SfG=!|EgZ24lRM_2ig)zuEE*lB_6S%Lw%7&Zl8uptWN;6ba?fL@yrLQ+a&$U}+w1 zTpOGH5M%7o6$VeG=B9>MsYAmG*|Azn)a?p8??4JgTo2boi$eM))4bJB1p2>ik7DDZ zLI_Q`l0^(V|Mupq*s5bXX^JarT=n|tbKa1Qcd6@cnL)7&mjaRa503X!;`P816|w|) z&kESZRAl<0+*)aHp#%jRptOKk>}C|kr7n}4n-PhMo4ZPOs1%4F42fa#D}Cr>&_ss9 ziN8~UAhd&#S23W?2k8FuXHqq8k2z-+dNY1MUT!L$7e60|iDM9G(6`HJjx2&e#MKpF zrFNzM#80pt-@xNUaf9zr(!SvGv|wk7Nga|dNjLsGW|HJ zX@s%Lcm)0&PE2aa$eyDNU7Ax7FO~f`2RjQpk0ejINsCvFTP`C9H;3Bcklt|0vzabh z<5eSGV@~xE%mNGxOd|}pev`Wwx8eF^mW?lI?Yd%>-SlxM?ZkL<3 zC`&099Sunm&mRZ@p$zoKphLi&fy9x#{#9um58U|RqCGND=hfe0gKmR{pw_46%K{g^ z<0vTk=7}&}+91q2WHF`^vcyB#Q3FrCL{SBS-}B#= zed(@>YK0_8GdX?~B$;K^ikP(Uupl;7q7(5g(|iK-Z}Dzk#7}mkH#${@l^Vp<;qard zfEz`MKkI6REf>x@QhCy)kI8)6^+fRoeI!kY($j;pOj8p5BnX)E_$7o#2@Z@h0B#t5 zBQdfMlhM`Z#etMo;4t3z({Q=JpP|g~2To~WI6_D^U8kC^x3{UQ$?J)`9MAoPs&%D} zTpsl1(CWJ_h8~Brj@Nnf1Vg9w@mE+%gOEqTNY zGdxjETosY9ck%g~2R}=%MbC+n50$g!Trs}by!u7fUZ=cU?S zI_&N9k9+1oy}b)_2+}0vk$97ZCF~~!9hUW2P6z17JXOjs_DYUpqQ?@FwNVlebk7z~ z&KYMJrUquvned7AQOw;j_5ovj0}Y|I>$1db2eo6@5qPoBBg5g$al2KbpRZqrKi3odekWbwSFOn~de{|hdIXV?&FSG+ zJYJe|kJ1C{3RiN|>76R;@tS?ECMKL!~`o7vp?ph|(N|r^YYD|G1TBA*N zk1=!ko!+*PUU6ZprEnpn#n=|av0KP|QR(I+y?)lc4_r&W8^2Bhn#x?&qzb0UlLDC) zs7SO}15&tq7MQaN4Iso?Gw{{Ce88Z7{=2}zbaxG>ndedwr_0alyR_d#c9|;6Lw4h# z#NWjQ#FJ^M8Eh2HlAlIsz*)U2!;LXLmZ)61&Sf>uRt<>x!?B|K(dN_|$u=Cl^2d6C zqcG+?M${Y9GiY@;PmONLj#8=2r7Qe1=}u+IRAGRHwq$r zGo|@{2R(Kmbv*7NnTFCIdU~uGFhb$6IceNur?Z6su$ii<(LiC;ks-XqtmHb_58R8e zl848$MSZ!x*Gc%MAM!CR3oFq8?l<_=-TS15ftcZ9Z_Ow7)g)DHL8k&qxJUbVc{Ve zNoI{EVzo##s@v#7%T!YOE6KuHXlE^2>D8$QCXl67MgEW^YUMOWooBn02Z9r|t^!UHAqUs1c6&8of*ex=`G_f| zo3IkoBLtw-qIF%GDLQVlJR7Y<49T(J$v#?YviOH>Rn{H@hm{uUyQ;8vaPkxdTCLV& z(y(w>^p(`r{FgR&Ly!FYySNo|sd6@Op5(%}Si4?A|B-aHm0jh(a@wkgk?dz=wZPbI z|Em0@>=bvyX-Hg3pVuOa_NU*W+ZpmdIx^Ygw$%>7>`Bxe+?uR8!8grQxkd5Rsb&;! zI*`S8?%pa9%ASY>BY19R?xHDD@gDDL(8F4wzeu%Bt8b`fQwzQniUG{BTk|w^Yt`W7 z%TEwj=c6@KapKP6&)#bxew$l2tUu4vD3==yjVkCW8^)WIRafRac3Q!;Y_B24hn2obP}(wT^tQeodQ8f`d>?-0+4?lRCA53ZUDMAQDqFrcYzF=!&`F&_XW6-W zoA9Q%xvr@(bN_NlREBhcMML=N;r%+6_GRdp>|2dd7*zgk?(_Pd0}YP@T2!j9r<5u5 zQ>mBziWlpEed^e3%xuj3fvyIfqgS{EupHCj%cENx@e4?D>YUMlEDvnhA z8;L~l77TJ+B;sB;YLiZ9?ZyaU-aqZhRF9i}SnW$yeE279{ z3t3WiSVs}7^11tJX%{O~78{U+ssJrnhUc2Ew-^R7i&R7k1?#o%bOI+=vKG+`U1)ZM2_`-J(@sPQk zhUWQ2+)Y3AQb)zzVJ-&rwV4{5^EVleJ&wcbwOw#%_`M=7+gd?W45qT_%_Nf{e0{3u z8qwV$UOb5RNA|)+$0=q15QpsAi$GzKoZm+=ijeWX$G*naL|Dn$%*BOA$)s9a91&ku^z*vp zVeOlmv^%Res;gGktxc=+#j}opK@4&x5}%jAsxEp(cijRJkSPS$=AZpZaF3ji!$R`} z;Oj&Qx4ykDp~l(7$j~%@h$9_;4Eaa^$r{=?xs`wV)>TKcl3-bp+Ud}uCt`TaL3X8o z_0XWN&XL2IzyZDREGdp-ICSmP&*Wa)zFXSg4K8`#!Wx z?46qm-5!M0juE%|{$6LRI-PX6DAn3Y0rtr7R-~;fI7N~$l1Z8A+eOxlI%YdY*>`&S z#*EddPEw#_CYd#R0@_u3^cF}qRUR21ihR?~M2?NWC)0}Wln8jqq-d@hnQFb$ftvV$lN~jUC|#~@wAvOu^DPt8YKp02J8(HFFEkmc zdPzr(erRaPpSAg?%D9T7X!57Dxa)#ymJN<$>GKI|Zs=!VIc3K3r**Ns5y;fa>L=wm z1wM6TnW5ZJJMM}74?>JsVDsOdkl?@$*mlyny-K?L!SVtjNFwnoUg;l8b15(Kvt+KQ z-BB)45jmRq1S(t}@we8@!arV9wO$qjX#`bX-APS6v&l6=5+X5B{Zd z(q#?W@M+g(9JWPtOOkZv4UWxU_;lav`BfTKIz`nD&8k}JRs@IU{Xbo|&G!-Gs&Q>*OsOVq<88yV(ZPa#}>xj5yJ{Kcg) ze?z+Im{pS9mCY*(HHH%%S&wy7Y^^9UE2}JVgM=M99K~&ALS@n(B$DPdp>rz?=Q7HBnvG;HWkCF-jpUg~TYs6F`mLFwUrF3deVL`8|>0;ijH$Z8XbPKuye3aPC zwBca0%XWJXZ?$w@Wtl%Zdz+WzI<8f6pxvxQTN4jgi4^Kvo({ZE&^PymS>-5rHR-V<60#;QZ-r;a9ZD6 zKJ@PEdlIBAVb$M9$%eP7JR-r$l}RNs=WT|M{i;^0N5?r<_B#ApWac?b$`au>FHhYV zV=*#b5^$;3$W=f!skv^_Bg}^n-b?fwsTd)7e{(GYOGrE82P&1bl(dy?sI#Q&wi@p) z<6AK?l1^wFxLC3x@1pl(_XGAphji{z_5&9|Ec@BZz}7&hWvpb)0_+y}V%q9unvA-e z2RjJD%kn%GnQd!^noX%%GE6fy9W=pH4aab}BAH3I;dV?Ia(j2Ve9+PjD@r+jYrpG8 zr>8G6VO?@l1G#%aJI+8@4k$I9iC|+HXJ(lOM4AT-(j1W8UXdM;W@o3rdItGRs+%tk z4REkNH`@hVI7O#nlDrlx$#}@`$|@V}&b!V-4VHCH8jUqMRrFHnCz9*$@Ak0vM9WBd zIaoJhwv2i0SlKp6|3I~=i5i9|ACdW>Ib1+o!|M#1ESu*Ij?CFm$f5gGy3l zk?t0$)}SNGQO3fd)enXjl32DZ)Lcr!v29%KtUyKp;t7D=4_ADbYVpevmqA^Z_u7Ay z&mt*ELQu_q37sMoc^vWKM8HV+?=_pnz|>zLBoJt>`Zlc5tIJ5|AVT>&10h6|RtDk& z8h+1M+2-am{^lESSG7QwDxKkTV}UBKr-b(i$6AJ?YY%sujdN)~uk)j2A~QzL3dxwu_d@tl4I}i%+Oh|7<&`;=qHNtmJwvE(K}{ zn9C22x=ZR=fnt*sl|)t5<{+eegtNgd)oxWC{UyPr0BUyfktx+HRXkG3ih(}^_)nPc zn%yS1X#lQ59i!nz_T=E|Usnpl?-cDUzxU>!H zL7ygOv>q8Xw2ep6vdac&@kWc+&QhrkiPTr=oZFac9qz?~)xcS%7LSGL3VvmfNDKBK&skK?gATpN?ud2;BEqG^h_z zbQ63!dx#`MU09zcpEX{^nT(ivv|Pk|WNd0Qma>u(%dvQD!`+~(D^yzI3+L7lw@vC6 zfZ0k)Y8dt-6xhyHrfEKf+x2?gb~^iuM{ocR(h<9CM9M5WdF7;wYnT#)(dSxayZT)9 z#R`9R$KIpQqu1T~8Y_kRWRwd7Dhf8Lt1@^gbZPq+=Fv*75W_}Wd-ZnC$)&?c7Si%xl}jtSTdxM1T2N~?hsU$Sb7ixqFI!VV0zfeSsc_3bi-Jv zxA;lQjM2&OnN8|R_DS|Jy!SuEaqj4Y zP+pUm(d0?Hx7>WOV2|zL`a_g_)bmHg0g)0;KFi9AafuSP$iexiY*Fb*-CjRkP>hGu z`*$rp$-1GOkpQ*GV4W4nMA`y>0hwkiUDaxphYB0;VY6Cd$^BEs50b=!V-^U5Xirf8;-%QX4>ntK>-B}8{c^+P7o2WW%8v??z-FqU~h|G5#{?t6^QjZOz1mBOa~{@s}bfbTZ%?i z#vGf99ecELXoP6B=4S2Aw9Q1p&M*sms;bf7JzUSc;o+gwwVD*x7ijg5Tb^5rodoW! zuLmoiH1kBktrB)#Exxv^iZwAb&V!;*k)gjg@Pf0S@#-+NvwQ|D{{stNtJzqPal>>S zZL2BB8Z8>%n9JQfsss2;9SF<67XIW$D1U~QjX%WTh0mGuF3g-ESUhbm z5acYoYp<8PSqYh5_Q`)W!W_jAO`-1j7XFF}XL_RY*najM%#l2vnU6Y|b;agjU`ef! zndu&G*l#IM;_+3ht}wcEh_Q^GDcRRaXkU{qWr{BDaBQ>DlB0VVGFLG%PvIUffsZSv zb62v8JFVv$_Gy$Mpx10UtteMMQPF4VGOMuwXR|~B2{qeXNQiGpTCCqIS8*2(Yx^-j zUi8P~sLs?cga*?kCL@TUi{rY`>W7IqXzKJo!+KG@aK&_Bap5R(xuj`U%k*u(pkVDvxy>@+PGz4jbd|abWefE)4JSC0m(HAvVjU32Hnv5a=Z}ti z6URD+siI0avx{XkGLUpVmkN`=-hbzoX&tXB8-`0G3uofcEyKC#4yH19gL@2-#AWom zcu8cLJul_eS6}v^9AS&eHN|47e1aS{j!DuIjpJXgi1i}(ywCA4bDal3%lfPX6s%N?m0@kw;zxXg`5BOwPhjdBG+E%S3<1EgH^ z%%LuZ44peacvw&B+?t@7fOnTB!l2) z2jNKr`Je(FE>nLmo@=!ouQnPR(&SdXzV-=pc@wsJOuwEh<=q7u?$QL_M-WC#)4zOC4-FLHxhi63GrEaUPN2YqJwUL(Uu9Yp78cQq0#>67M^olmmuCM8K1mr>AAwT^y zYP&OK`V`Q_C@f)|8H!#IHx}K7*-c#!U!d4Rp3cT-YgL1p4P`me#_x}a<*a_rk3%D+ zm80zUR>_zCHI0TVrYkID2B%C5b_LToPp~p3`SF+iXsTC`nUenKSz+lB7s~~^K>68|sAj%=R*vN(7 zSY>B*n$ze+Lvw*%Uz>{x1$MF#ldhO5$!(x*Por?BAkh+3&R#Tx?(*4vr#ecU8hdgM zCX7Np>c>y;YLz6~E%s&ITh!E}IOg7vbt>Z&B^4}pAXXt0QMvP-k?XXb-O?n20O2ND za=C|UJ@tC-wS;@=(YuQPdhrwC<44nnOD%*xoG&UT<|n6LG!Hr_IxoIDj`e}<9YL}l z&Q34~Q`V}TbZg_r($%&?2UP)?b%9Ei5{Y}|zWw_P1ghP#6?;duSZotyYzFqHi~Mp=WJB;FK@@hzjCxKpxx=GBXVO3 zMlYOtE}q$X6zJK1k{lCdMxxXb&p5DyepY8Lh+B|8>~bg&Ybu4t)2a;c_=EZV$N;ct zC*{T~PIcJ=S0x}q|H=TCH5!YqXAj(b!+O+b&OaPO4#PR|plU&A_O(gc_~wz$V(W}Z z(F}=a0(KN*he}IhTLd&6MO{}Bu^y*C9JHeD(O2;b0s zB8Sud<{I*+@G<99oosFn3=mz$X_kns_MmA@*{bN#->^P&eeUqu%KCbNXi2pijdWp7 z7ljxjb!Co+wTl)&^dY3TwkG?urbNf%GOZ^GBU6nH*iyxGbVn{eR5(J8M=kASE=A%U z*2Ua$C&(()(_FtS1 z1!rb@+oSo4-E&#C$9{F|)FhkEs&-D+if5i|Z6nGy^U3$Xq5}g726x*p%l=B7+A+F; zD9n-;i$=r71F{L}WxtU$ct2~T?Oh-XD@#)pYgE(DdjE6%Dv8^&RXtBOExCc4?O+wz z(35f6;jm4lVVzErD)7nb!f(QGB9E@>h`8~>wurpOQ3tF-nl$P?GMzY>X=E($*bL>G z-OvPC#<%3>!p{Yyq|L=^Kc~SmH!Mx6dAEj`=$2g439-;v^laTx?#O^k#7mQbv{>=L z_0vd0k#%IobW8A(vuMNg=v(z!0%bWa4@TWf9P@CnjtdkzvQ~9HJ*y}u1_b|E&d17S z$GLqn>4m6NW*Xn+D2fn1GA|lVQ~eo(Ii# z9GA?_{8o_Ck`z(lca_mlT;eb^8xp96iw*fLjz)=392);iso54p;!- z(D@^V-`l9g{17T$p-ISZIx7HKvMR=Zi_HE8tQp)U zj><(c5c8U_Y0Zvi#mmZ1po5VyQ{sV8X&18Q$Jd_9AYiT~|LZ0RaIupMFRoV^*pJMNLs`@j~Sc1<6r&fRuO((jB+=L)#{4;K*%$eAwpDG3{nkygf4x z9iV!sLI^!(#;S#|;TD1HPd|$icLUn{7+2QmUE-)XzoOuwEBYL)JJF62i?*y6O|tFM zYqc6qSNL-1_yN-T&h&ay$HO@Mt3&`KKFRNWAt#L1*8e<5<<@!KIi4GwlZCKbDJYhV z*RKkRlC-ek zno{2l($KLAaD>!wXbaIG#BngcLysu|js#<|Tl%RJ?YR*xUdN`CO0B@_)MuI>+x{VVMp5i>Il4rh+)hy(PG)tLmfKq3kF+L)b12jeDWf z%cE17pgvGn?$QMK5sEX*`!lD*=oO^ky{p$@oqA@ErPX-4^QO18|HaPS>D1@p(BC+z zI{<7==P=B7+&=+>^I}!rs0XsfH|VeF^i6G66v*aLS=LAN=kfk0-28X#d<|P$%sKQ< zr#geSo;158qhoeTlUiLuG+yzd3zz(ByyaE9-fA$H~ zlk_Ar&4IPUa80vA{Dyg2ZTg4${fb?d3HF3M{Dz=Q;viD-Wsz-Zb_1}^(I4$mE(vx{ zG?zF9y}zFE>Ndr0w{b_-oJO_9_(VcSeMw>RQqrD|n7ki!mI)XT5^YIm%qP$fKJ@v# zfnA|I64=YImskyGir4}xeTT-H{L!S6$|f$mdw6caCs+n!ASg@UGZ_`o2XegF*9Sv6%?L-I4Vk^GEe^% zb`c~-diE;tleD;m{lL4dtygDK+P+j3sevvS+-b;5viehZaG7q&rB+r2onQg~3zv)~ z{^U1IAFTlq*0VA*NHmD@?~pb^w0x}7Lu~dja;9mhCU+rrP%Hz~x_JC&F+CIa z+q2g-gxgdP{#Qdcg4aK_ z0_+!^ntDw*%%0gt(L<3u9hhzivmlJg@C7cAORih&7xINV& z_I2g9=Z;xUrH)_aJ3*6ILdVay=Eyc4)8=i53&7Al7T7$)S6C7nI@&M~EO*WuuhzoA1TF}a4>#9R8&V>u} zmd3~5Vj*i-*M@w5g}Bx!sP#7og@m}0g|6hxNu`qS{3K;YXuyBPMg*m$wO!<(p;+^3 zm=GdP84qV20ML@CNlQnoevPEYYqHq&p9_D}xzGwihX^Q2ALeVTKItGdH>*lpUapWV7KwP$+A z^rF?&*4y@%%|Q$E-w>7ZoebA&SLW7*0~<#`wys>IQR%^)uMJBtq+mw?t;3Hx!V#)d zzDqa4km>|sYP*vabcw)B9L}(N@H#X$j8C0gXQ7M0WwPT`XDDfmSp*Z=%Bw`v8n~Cd zBlI6Hp;gH1QisnVhqMeoFBFm#{~dd-a_?XyE+SsP>3>6Qf&cJ^JIt;gYI*@>H_Suh zcW!~Sr}0I`YV#{eIR6~^dK4syvDmYq+z%@Im)l-WW;j-c?6-K1G=9JnJx-uyc7jzml`oE(T5)`YCS{CX`(IQURJJ|(K6lIu;?r^Dli ze2G%p$&VZAOA1@`o7RBLe2v@*U+^X#Iv%{_2h7EwHOdAl-q1z3OE5Ue{rnp=l098z zCGK^Zc12$g;Q~g8kIv|qaA-(XoJviSfDXmCQ_g3%>-`g@1YDcf*kgSBb~p=lFK!A? zp_8!+zymu>6q`!Did@r^N35A-fL zpLB~w1i$`S{KLO9gilAjq_a{Ld*JcUbSFeMs@lfcE((IlIlp!;U1Q+Cv_{ ziFIwuq=i-<6mv_c&ye>}MbKz8#Z~5pi2p7FlU>#j8X!ORE{Y`1XNg5}RzjeVHUo%K}kE8Kdkf-XcVVI8zWk#XpHsC~| zm3mBPjv>9cO*wTmrEZhus(`)-hNfYEN?$JGEqeo(& zw^h7x7O2F@@kNERrA{o^N6*st_!iz6Uagb z+f`3CMbUVzeh?(s`}g+xitT-S3V-G?m19z~ChgH)&i;g#v8=ZMe)y5jYLbO7z9*7v zY6{fKCbQ^(uY2Ka)L;@b92U3~pwZyqVdAPC9Ox~d{KZ9=kD!C5F_wdT(Y z?s$*Bsgjp^W9~uPDYKvCIBIkpZ1s*4Z(`c%bOrqv-Njs8`u!qCz$WPvaUOpb$1`#nk zn0+Mch--%K8ysZV5A^?{4O##H(1wI8j9mYVWT=}k^N&CYJAC6AiQ6dxUQ}@Plc;%C z68Bl$*!M?fI{iGPq69+MSKyZGJaNeB$?J#FWs%kSaAseUyFdjx$W6==e#zZ2+AWMu zjA_R1dYJ>oXGMWCZN$D?E(mPQmAD#eHU+_*8?@hXWe`5YEijRzwjam)sM2`}U}rz= zJNSeBHaUfCWmdd2ZMxeH^cKwY0x;r@a_qYJY3ef#-zGR+hRtS`u2b6Q{tR6uaX{^P zhpRr;ckcg^sFJ4V>x$pV!NemgrbSO0E8!&XLd;EUYgP35yT?6jt9Iw$jdrj}CjKTO zn{+M}mW5d+ft@s-4cr;}`jja^NYY7jub!;E=-lZ&fDSDqiFfCq9_2-=yrURzVimtz zjpN2R%dus3r?_!=!1&Y7CCX||fG;d3?*9g#?f(hBil>7qA%nb;m9mR1A%l#mowEG4H(8W~L)Y#tS-<@)V%>Qk2a&{qPX8hlH&@D}A`wakM&x`ux z99Q_}3w;#fZvsc7CONnXOZZUuXyCXFsS=_k>ED-}0+VZ{0uyalqI^W!h>!G)oROEQ z@%bO1mO@VP@J$dIKiqLqLHk)LanT`?f0*9N2g+emf~OuJ!^zEqkpco6`r3hb@NGq! z$t2YvQ@?SBg`5$E3!#4sYod}0VMfhBiw?LJYULZ*3R-!!lQXkD1!82 zqk{n{p@o3b2|W-xsCYz*0YlZ$lo|pADWQaBp;svh9i)T~(wm51&bjx#ch2|j7~l8a zz5f{h_{YDq$JlGFtTpFcYp=cMoNLWF_lzdA5whx3cspkwsni>MfjO9yzmj86w4r^$ zVprrx7_4Vt243AM1@}}Q9Plgpj4;eE52@2=9Dz0lTmHfWszZT#dtJ}>Gly5xqqivN zDcjGJ?Lt*&eqLD&J-fNE64QD(>z$s`3}0NtvnJvi_ zj?Nk#yK29Q{Y^kS)?lEyRV;Io)JCs_2)_|iw4D?2?3;>iwe6X;%-n%!8Dp%0e#doe z=%e3I%;-0#&oHN#yh60mJ?LorCim^ZNZSqIn-<#YO_fZCwrfF2$}(Fb=}xKG`tYb( zTcu8~$0|kThnY{eYA8L+Hy7Ia9U8Em6ypb8LEds9*oXZZn`R^GKQA#bIUM)dwKTpz z3*N?A-~QOQnRAS(fB#O5t=vuj(=(4ns)mU#i`M5+XSdvNcpg(#A@-a7dM}3 zH8-rjMUtbHA4IvF%rFnQ(RHLsA#M9-G{^VqLV4S(dvmAg@L!4qZW|B{0#gAD*oNA3ZxIViTK0v#vAbOB44< zCd;);oS&gn-@?wa?XerU1D9^wy?9pap2MAFH;>Jy(~oVP4!?tIoRRWX)u<~F;rZl` z(N1h&O4p_~h?o9>1W>@uLGz7OWu|Ya!8JkB)0ozZ&mgNg^4DGF`Q7kKi3Uf=>iric zoDw`Z0?j4c*!8T*FiV(I8KW@Qclg*|X-49uvNz22aUYuWl~`z66Hfo)vef9}Shtia zl)Th(MEbB%fqDj()z#0Rd$oIScY0CgFms?FR8}dp-ZjDyO@5<(WA2=w)nE>@)9}?2 zVIqs@q4C0ib&HLVNpUspi{&H`y?)q`M$+MOCJDtjxgGK>OlUTsM2B;w-sgO8OT5U6zl5;^d7li9AAcM zKV|^78eZ?@NrdVZ1Jt>V^JfpwtkTfX^QBgev}HmM=i;qsD=%f3qX zIr47iSI+!en5kQnjQPe;!v8ay$JrW#&slq3+X&nlT8zX#3KoIuy=WU z-!p&*O3mNok&}|N_e2k!0}I|m;g6%`&yjR0RQV_zzF z`6H)wi|3a7AJuN(rg9xBia&}qslkk8ewP=hcr>M?cw~NOU%#hicy9e2s-_~3%-<;a zz2!rMr=78bFO?R_hw7Rg%6Ju71|#R^C1=3?}7<+lss+W41W%&ec#2`=b-~a&C}h> z)8qHPEAU9FdAfNbjJ+P)Q8}o(gP)6?gOL`MmOqM35RW~496b^4RJ8xWbJf*M{=spz z9bBB8sZivksnj5CegDA!M{oW?%s)CHY35>2<=*m&GXKHJWp3S+{<8%2YxvhUz-3)c z9ZdiY4FEtx{Q-WB0n`9zPSc&CJALL1-5GlNGiMntGccSx$H02=()r8mtXHnEv$C;q za0^`L;N;_CW8)Fy;S&@R5f!<5UHqoF@J#_>5#ir~(9qMq(NcTEM9+LfiiP!_;aMOXJFm3qV{cz{Vp*T8ocw(g2;y0M!uwJ_ z{#!<7$nsBW#saqC>KmI>aIAku^EY(Amoohc`QJ-N0q1F{gfY=F0qy{P%Kd}1|L3nX z6rj3M4U`py$oJJ)rN{n`kv<0Z*sVLNUzXqB@jLF>DgbavAurb2^TOgfq<<+`WbrR# zUEyjkUXwXP_6>ob2osWSqVw_7pt`Y+Z0TN#C#7t3P7+{HEebDAG}JaSlm^XYVJ zUZ|ZzMh^`RcFYIh`}u%l9#xwn-81P{H69k8O~PiFw>BrChCzHlrI7lEd$SXS*Ivt* zSRpaD7j==v+b-D5qf=eEZSFuLX$1+BxTK{DI|mKO&lU<2DRA0a++F>l*2xkV%3wLn zqkh`-*oLcB(ywDEXlB1iTf>rJIp{b26V}MJQ2KQ6(SUBOWekensxWsUiACf3=aBnL_O)i#qGMo}s`k?j zKvwCz%*q{z#&Yya<frsg1U+AOu<6Z>$asK z^88WxAuk}GMEz~xsjp#Wih@PH-?bOX_9p6D5=8%1HwP=3+zPMa3Ko-g-abz>q+o&S z({V){6Y)jIFXBv~oi#;kYj=^}Am%DXBUxK4nT>xRLzr_7=^AwhB8MZ##H=*38lkv# z;Cg(l8NmTnw#E@N^?v1xdS0Ek^$$d?YK#S|3zvJ6ArL!+AH-#r5&#&3pqhul{Q>P4cU%NKP=QW4bjNj_ikR?y> zjThq}7BEi5BxCJ#CBfH>y&_qKJ#LA+3iXp&3Hn+->N_3-u*7^9>~@sG^Hryz<=>t> zT|#%t``$mE?qaLlaxzPFFPOIYiRiGo=@|Q6<%G#kOm4D1y*pR8xhaJq3u#&r#&5oZIP`< zvvkoRTOM@azR+>wd&z571=E66>xRDdXCt7nlzTqSo2l;Pvmg*;QtW$_ReIG}%~v8p zA}yqf8ePkQ)0)gj%v!_dgQAi03IWBo@tv1qAhZ*0lh&T{?=H6RCC`z?$eCBW>>Z%E zwBe;?ySOiK-0s>`8T;)%*|PMOo1MFkkH*Vy;qranQ9_v1o1J*`!8pIwT%FLig)Y3m z=t~nTfx-y0PX;<_N-cAyHHKW+m|P9o-NP3MA*2Y0A_AmEw=QjMAFnis>SUMeTk|0D z;(wsiTwjjJX}z+L^qI`pO){TD6(ucBT?)3gpMuctB$kz3A#EvgG%e%u#U(bZu*>xh z1+hVq3Dyhv6c__z%!3BJmS=9g^tTljUWA@FYYaPVXRK-~{gvDV{KItBO6)BJCcw$G zhLe9Cy}3h9GGTmvF?3W<%%d?&!1Gp3eXk^VXVHDox_EH6)Uex(Q`FwtRI`v|l0Rni zy7ObEaK!;ZUdZRiM9s>2T@&bKvNCtrJ$R@)vL}Ius|RaLcI&L6*1Vzp48Cx29tCNbH^49lwp`Z3yJTepd?JhEmj3@;c--pW)?E6CXvExE4<`y#4v zP;TI6`6KRyOzOf`{`TIKmpJ~`xaXUn7v$8OlDM_*QZP7v57H``&?pZPGAGP%u0_W; z(DTKc%EC%+RYVv3)JL{_0vo(A8HMyF)5Rs3I{D!C)^QrD`bh=_pG1!}re649Ul4x* zTp>(=il_fA*7={6FGU%`IQt57O-$5Q$<;%~V+Al{3tlZoWR>Kbv(#_I5Zgm)tU(SI zg|0Frp9CDQ#MjxQEc3EQ@uhf*86O&`GI6=WF4*4Ax_Qhh65Ghs)**GI5<6ajT%C{d z|H|KY*n zBb@pZ6LJMc;N#i7P`83Q+g)E1otimy&Ai=(+g_Y1m11xPj<{V8T;Y%QE2nT>)w*MZ z!=YeqGE~*BkCX4ct)#S2hsSh?c=iX=92UtYz-r%K(&0o+}zmHaWIyY9(};6q#2u-SDSL}@nZUx7tJdDvwSuD z&~iEnT*)hG&@xM%=}?x?A&t?J`>ufmBWqzB9UDf$7ZVUX_-#4#amCndcHS?5PWsc) z48d-e>>zhMSVaBamym3xT8PPvTSX`L1AIb8aau3D1`?byd@3i5J<-ZrIftD%Y+r&y zt=ZWj4&0}j3_d7Zk*(zV+;OHwJA-Se<;gL%IzYaUo<~?!&`zZQE^o|;`mLOY&4^MQ zM(Vj&&w^{S$4=7rJ&HfQ?DFu?xRjwv`@FrsoYhAxa zkK9zL+{KJ~2C}$UbiI4aZzBLN?OiMwYy<$OfTg7f{J=GY7n_*PLxg;>AX>r%^Ict}`6jq67blMvm~ z$^7(kV1YbXCyfFYaeWH1?9yx;vkw2k_;yZv=%8R>bno{1m2|gba8^B`Ys(x) zFt}se0=OSg8vp50(~9bltXr4qCzjCdiA8ml07<9h&KGiu!=>$p507%;d4%3ve)%-` zR6w;MY3TEajqXW6UwefHIkuDI6#iTf1-g+ z(JJWOFMz<1IxZ_p>*vpMbXMC4V?mE|GE8(R zCYt4;LC*#OeYN9D?lMuvP+l$O`l5LVHNf4O4V7B_XtXOkkp`N3NKiF$hpq znEvZm=`%7i_B5yqR{@Xye%=JoPH#zJ7J{J-V=$V%e@gU!DKtI7YN$`UcSC$54^H~o z?m>gzW8$V^eX(Fd4+^L%()Co=t!hQtZSd?$aXnCERzst}$pDdgwRSbU`)G7(H#u~U zW8l*dJZ+a&AA}>wFUbAO+mL)-(Kz(|6zo6h&W_Nf?h+bT#D28JRZP-t z_@x~EoE`N%b)c;GGi*a(vQ2ENxcC>q!f8_VwM8QodQ#R~7x@GH&4%9PNy((@{}1u+ zWB%_NN6Q+zw=7|`e~fcMsgQ(svuNj#ylPogjKMWxRTF$)Eve=FC58_pyg7tnm$%4j z#Yg!*5Wk@n=lae&FRCl9#Bzgmz|3-51OfD&pCqMco0v_()+m!QOg%ggi||1heJ;ZZ zwM42_7bc8t#j+hD>jkx$D>GeGTsada0{irzTyU%$a(XKOIqYkNU3l=iDM3{J_)e6` zqIU@y6U$-R>}=i_hYwdYyT$e#S2dcCO@}jKn}hAfoojyq{B48L_Iq_< zW0WD0!^bA`>YirGa|Vj)6a`iMr;p2liv^sN6ETU9qkaAT(B9Oo)}YRfwvxDsg5&o5 zAEbehZ*n56#xdTI&ucuTX$ze*xI10$g~vR{H5q#xdl| z>(WxZb#EMCU zmkFu9q8H{1|1r71HUxwG26HzBJ%3+2I)_F@o$xA}*Y-zlxO; z%{y}UC8}+E(7&;9J@|6Zi>P~0EUzA~eZ$OGT|YfB-n*l0DaFC`7<3)I*jhTEKaWS_ z?Gk&ja4G z*SKBEWHW#QMvCiB?jWG@GY>}3|Zk2xd>0<@Rh1&34e@VJB^5Fe){^)eeG#pq=O2cbc#d6}7kq)QlDDKGj zo2tT%CZg-li0~=Xq%Etf97W}wwCEUo?ZRp>CC#mT6mFkeIbdpL(n3VE>MxYuIl%;b zX^NV9IrbkK@-5{1SpD>y{90J(PO?(W!_``>dt0q}hRx-Npfq45@0Fe%-C?b({AnLv zUrLlo*tQ~R7>wtrp)6pUs*l5yM{y~j1ad|?bOKNA)3N3>yWpp)V{E{1znvp?&OjO` z*wGqQTR77oX`uAv`2{_G$K!QV{Ks#OrS@#KXJp~ghZc0Oj^ZelwV_*q2{=w2Sc2D- z_`CNt);LeO(JDC^%a^>0I7+{-OL=RhX)oIRF=3~`L5-_BFmc&3z~8Uh{_<2gXIhe; z{zU78`Mfs;BzBj@Q^UBDoE(!a9QFoeZAKe)vXCQt6MQ}D{HD(JFvoh}?S!oeo1h2f zstpfGUj%#;6OwL-S?7skU z`pd|J(wM0cU`F~YWXHgR#(CZa%2*(BewnV*wNqTaBO0d+v$My(sO#y^L(AhAGT6C0 z7%YdZUE>Q({Xtx1eCVQW!LJ1ht_Cr!;E-xNbRf5n3$tQDLfAr5@LJfd>~H~cB5u3WLpjdii!29d94{BVJbHekbB%eGN{ zMc8O^%_yWlh(VsCVnlcayX4~v@3*#528t&2_$%-4AdR5Q5g)}W0w}7JXPHEE@Zmy@ zuIuRD{%4_<5`+G_CYboFtAKV-BwW$3OU6<_-d*=L=SF@FAY}ya=W;?Lcxs5 zJ@G@^0mkzi?L* z6F>%ny~RFE{t4w)rU{wS5nqm_gQ}wa zA^Fx&woE-T5~Keq`0_f3{eb;yVt7)=i~>o-6hM~u>Y#z%;iWzWK|m(;UN9(JtM&7{E1(H-vS8Bb4dglYV_X50TNUD z7Tc9mQh}evQe02VMdM(7UrH`uDPU2-*fWZsxoq3L-0e-G4~J{*(sr$Fa!Q|a>8jWc z=>%ieE=UXb;&4myLnS_TH_9GPt@XUG`J7-}(?)?G8T!3^Kx5&3#*J-4P0yvh56aA& z1JaTOT9_1w z4&E1hcM&Kk<&oP$Za*)aXwBT*zUivfTm`IpXuDdf#w@_Xw}GzRM*e6A(7teKUL%h? zf>@>C5pp1=$Oc>1p0hJLmgTq6y-xD-5oO9yaUS{?+%hJ<)JVIfS6Q7=uEt%F5G(IW z$R~(hyYLO)0=I(A1(``pWVOzS#h#%Zy(z2Z2l&*z3msiTs89v?kjB87=EG+(%1lhk*CvuI9$P(Ni zp33_u!PaG(HReLB9XyTXVt2k?$vB^G>_sc;$@!`>e^ysS2xgu&gEJQK@By) z1uM+Y0Ky%wxD9n>j3MmXT8!#TOjjCW6{vo^%z&vHu|zOIBP{C{&BG@2MW$-nT}@By!+3d z=l{Lz-=R0sGLGn$KfM#ab>~OmNuNuJZPB+&-|FH{CZAN${jtgRza#yh<;|{P@3bR3 z_hx@+n^U}(pC6}KHG15sc&hLA~fPCEQ`#R_0Uw~6%C!W_w)B^q)mh+N-BF@rv zxKM%iK$8C#pel2X2L6`@8n+&NPUXm$IE}IpEK2{7ZJBB^mHCeOgZg5*u2Nci^|I`e z=?9`mG0f$*v^ljUttq}CIbPeYAVXp5R?U@I1aD2~*)w1H4wKD1nu`0#6KQ)N_qTXU z>n8M*l9%)XD?h>1cy&&9YsrQj*=TcqjI-KWfmI1)m6BA~@P$$RO=%4jOH%(X|GB~Q zPk)C$V{w($BVLQod~g(pM_-N{QFv)r@4!5~HgMarx~3+Hv(gv5hq;FteS~MDf$Yxp5C7=vUv9+TGyk56I()qQd7aO^RCCTm-bP|`0-3(_ zZ9qwL3AUuRBf2LVd+O8q}P8oQ!;36exd_DQn~Z-dAmrX*2{y-46|e9%sa5Ktj)ENs33_(!$7-30r|xZ z>Ri6WpZD|H^>24UasM(Mw5GlOxwTLzOt(bWKac`14SOM~EVtbNGO(_N!fF~gSzs!r zB?xGX@_3~i#6_5+z`_fX{~WEX3{_IO*=iAI$GW-o3-AW3j&ay7q-w_c4kIQ3DO_)@ z4`3>>S@FWNt`I+pb$Sfn%QT%7{%Qj?EW|G|6|$@;QhdJp=pCmlM5)~X z$b~TzQujN$QWOpkQ>sia^06?xXHtxi&)bQ7>M?ixxG=YY1l=4{_7KnU%R_g}XNUZh zoq9iEE%Gq`5-dS16z$KJlJ(sZWaTOP{*ypkvDvUSVM{q)J^?N_#FM_T z$xG(?qsO^+eM4|{QQZ}*XMScY^n5oPgF)Q~zgo2=fjwhR5adV6r+9+OYpm)m$b1rrDNJ-F~SJrJYEa(v&lK@d3B64ASPy< z&j(^bi;2^i(g^Y_7;LL9wcwt~8j7~eCK~AVBhfu}(1Fl_8Bu7ev~wBpyyN!ia&Z~` z*o*?>DFS!Wx;9Gmm9Gtw5d^+kNS{A z!h~xLdnSn%%Ca_~Wv?SnV0dLQUZUm08(b`u7SlIWR{Ok0LqJB_N6Hcx-K^1H_r|fZ zP59<3ecb>iPFk{tzKv3;-03*RnfjKsB!Rp`r`Ts@r6teSe9}th*QRDveZoW`qu(W~ z>?)Uo4sAVU@EH&k=hCvb&))46$xnsvQHBH-HBM5&{M-l(GS)wl1~Zl^!C;0|dV{Z^ zK{ca=12~%Cjs2TWmsjP_x{5#zK#)gCuf|eIN=qS>;4dnE(Vtp$iBNlY`J}AIpQ?#;s~d1VM9<0HSF(s=d$f@}>Tl z^85u`Dq*vR^~!vx{Xdp$I@{j2T`rKa482KQir2^{1F;r(CYD)WfYGk zN+yCPNTKVEJNLZ0C{iISk_d~H^-BfmI8vUIyhK8exz*_clvm(t+xKnO^IQH@*1d)M z1z3%oKD=;|C1vizlsn18w~SzievQjoQsprp_CP|>osgN~LM*e~S_w8TiM2?oL2zLR z9voBbYqF)KqY}B$AYkHS*by1EnQ{h{HYObb`Q(w0i}`H3<`9>p^!}dL_ekbhx!MV0 z1_@>X!a12wurlo7ilXZwfDDI2&$1IntHKwUQ)M(<|?Wu-Vatz zUFsn#l^kuFR< zsN!Y3<`H1LMF{cza5Vrq4Ylg9gtN68vR;5BgpAqvY%#yO#alICHZWitKyn3LKPiT|ilYchAdg>SM}Z zeuKr#i5i7~(pC50DbBEL6!h=u|_&Osp#%o$3i(*SyH5o6F5^W)K(W_j=je zsIX)E758#<4CvY#)A=z;=sKN8>*A`J_R6~q_D0_xP5GoB);an75oNo{-+cdcgpXx)orU%l>b0VE)?r0ix&- zjrqLsLSl3aAD^v1A}ils=ndrh>lyMeGI!$&#UKew@Ac1k{_kaf(ppP9{!^U=r2R?K z>nlB>t2&;%Qx>uD&%j&}?E#!Hw%=J>_@RM?gwPzb$3GeC-Fr&-b7hyIu>#jAT3nV6 z8~t{9n1fs&6B84QcEsIlI;VByqtT`OZ>foTJbw79c+?`aO3-D2npyA<;H=}!mKr~QU%aMAqlvMXYHNA1kI7;XNbKvHDk&)BE5X=AeWQMaHvj8F;mnkFyr$_zV5 z_Cm+o*O1S#Q5m}h16t}d*{b;1_aa8-mfrCR*4}fd| z*2N`j<6HiVSL7s)CUB-$<=2)uAK@j;+-d=f;gixzz!e|EHPae75?J&qz-;52o?DM< zaJk9tjEpcRh;h00$Tgxm4ywl1pp zwZu7F0+|u%Myvt9FLSTx- zs7rAfHaA|tPVD#XmVUTxB*&MA9UQAI$=S?%mzx_Bm;Ve+Gzh__ccW0x77R|CteW0! zvV%Z;Gm@m+?CeNuwtOkX%wSI1Yn&&vfyo=$&B z|2s4n8psCyhPBQwfajf2@%x3pYgh^YZ`82x)g?QO<>I=|tF?jQ&S}-G{66sX|ffre&sz?GsFGxR<9A zBq~V`NeagZ<<5d>w;l~oeKWu1)31O|mk;vZ_}Sfa!RDa`31=r=k~%9_%NB9d`0Uuc zq{V|A&sz|c#WNpy0wMycx<3D~#k~E{F!At1X&bI?5v4J;EA*6R3lA6Ntzj}N6Wy0S za~D8!t$Qmd#HeHW=*?Wh1mBh6VLSw}r~krqQhExTlaH&P{c1D{zebk!4NjU|@=f)8 z!#K#XBWpSS7dQ^1JEnwZL5L3q9ZIL1RkKwWYgN~g~DFSt-T#CHfj*?hXa&V|}ej&QFX%e>}5ohT# zTaX{?Y!!=I>LM8^(d@s^<~i@|+(yg6Aq{AG_WIo;THP{=j+XIxAd6IdcY``aeVFsz z!o8q(84KdHqE{(~S)!#@sVj?0Jb#(6f6DypONapBWB0c$EV7gFV=>YcFa#=~=V)=L zq!{d6ch>3rmgOrc1nC}$FIjU$JoV!l$ zG@*e6|DVA*6YgGi%NHvw`$k~rOcFI5Pl}l~&aSCkPnYu!u!w6Id1+?l%6a1P4POAU zUW{GIwo$AYFU#PE7H{3(p+>$+(tk2v50SxI5C+F916`IZMrB<&o`yj`OIX0I%A&a} z@ZqP6Lbgm~Ll)C3pb8OBOsd5@Dp$WVSH5VcO)LDKQ4CSD`>mRQ#EitZdgq(x7UO); zYf6O>B40oYi%&s?Bbm7D-!cLklLe)0A6g99)LmrU%D6EwYTGFa=XcNDl4VYDDON;| zf8X?sl-JeO6O${TypOF1UWaf7)+hyCq@C@NIQV&u+wgG)Mj>O0tr!Btq5OU@ul58J z;uV~En;mM$ESunpc(@C42=}aedR;Lz1BwC*LIiA*?hm>hN5*hg5f<+)dA}-FX!m*_ z$B$8tHfq_21h`a5L3}X>QTwMh$4D3T{AX?;+TYxbo2^*2S<1C6!e4DEfn%qMes z{vPU!Q9J2eXn&xdV;QQb<7Jx3OQekFHm%L->V}Xh}WNJxGCyf za?>uvs9nONzZ52#SF1Q4m2Ffm&C!oTeJ*W@8HR^LWsg2RL|Yll+b4l8RgqxU_AFm1o)ZKhw1IO1qY7KO5?1umFE%=DTiI>TW@F#R!p_;Z`kj)vn zQ9ZW&ZI-(MuK@d|GhMo_-xf)tHHZi#%wxj$!AbfASwC`vllFRa@qGVf&=8Y!lwA_8Cv_GO4kHfK1SOPa>Ax zy$qcwb8|rJyPe1D?vD2&;=8h-QQ`4lfcJ8a;TDcLvAU7TSFJLZVkRznI1e?l)aK~r z2Z|9-fTwb%k!26^MH(HdJU2O7Ybt~bE8IMsU|~Ca!~EQnO$D#?1tO0N?zXfivyqw&`ubK_xJL zAn#*?e_q3auZyd*^=V>#?lT;*sqvLq5gB6Ukoy67QbAnC(z%wzKeZ zzV6FfbOFC;6XYOC7sT60`e8Vno%?K8suO|DOP2AgB{GIlJ*4^N@Ei zvwOF&Dc%zZYwvnpB1kndExkH>x;sVXOR~Isi7V;$?HmK;vJr6=?h2*h#bhUw0Z>Nl zSo(?36P=OX@%Hkt>-?au=IFa}3ia-OQvQT_)259nhAdLy6IEir8_^vpV&R@spfMH0 za(yRD1h)Cp&hwl{aaavmFgq+!AnlG@QImL-60Y%+=Myba=X00;#9#lMr1_KK)(yL) zKbe&&{)dLMnQeE>=y_Q8#NooXKezoynx2n4x6-KDB>4rrbX&2Xx?ooyoQ`1p93vZ= z9I@4znzmrzmF#?2^&J>Qqx6qP)GN()+a&S{C@>{&ckl>6^BbHOm&Gf(Ka|!+yMV46 zA1~-$8hW@VyOdAh%H(i60zAf|z_Ls%Gh7AT-8u6+i*2+NbCSO_L4K)k2XLid$X=># z;(O8`xkF7^*}N>*Dm6w{_)btubC4e*-{khk>nf=Ed5XTr*wDjbIG)60Hh23eb)EZj zWjevcXP|MiquKUkr*9pQe-p!534Cih;+Pko)oV>jzM9%sN=2O%X6(;1O~?It7lNoF zwQI@hZtLVzMYg&71HNuP)P*oe`v^qo{oZN%bD!9Ox+9rX!6VmcL9w$~sKxl0XSz7y zX83}IpSx~tr;vu`gL&R8?FRch`Z(UKtUY6+5%-)*&3VK7ZPay%Bo6BGB@D~;_Cu#i z^$)jB{et_0{ZiZ|a`F5!g8!}{m zv0iqV)bau6Dz+~qySQg6m^O7!VtDv+q0PmjiZ^qW7Vj-w_>*;z-6Y@&q5m*emHv!W zknY}KwyRCsx??Fx!ZhnR+M<^8l>EJymW$H%^#kg~h1x3V?5X*ZZO9TiBDDE*B69QK zwWoeCtchgi6{eWX*E$aY^+Xf4!2|u)=)34-?ki`|40ei(fGa<|CLU> z=}5W7HKBjjw*t}X{Gy#=yzh!B_*(70)2?+7{9y8H%kaQH?~P@2X^B}jN38X76v^Pj zpQhShl63#Mlk+0>Pm0p%`PKursqNqGC|mvyc9cw3w8DGFdWe*Sgmw^r-dgb%KFYAE zd9{Mg96sXiV>Pni+mBe8!tK&l{sr~Fl=%~>|Kr^#8k%XKY!4=UCk2V9AG!)-m;WLE zHIr#vULe2SWhFNkE-M1Jd9W5pDtyO(_g|v+&yD{bBfhG9_4(C&eDvEfU0ljwEd{aq z`str>{A=ld1BL$@0e@}%@7Q|!E5v^>_wSf{{~`&0Y5wovTYmxgFJ}HN^M|&#C9Xxg zW^6{(GbPkF=nsl?hd^joxu&d(PaZ6h^JJo^ zZY82SBjs8!uz3?wX0t8kWQg}!naufTzneMRsc9M{tCmdhVb**XYiX6ZAH0D1({gJW z^PESXc>p*g2PcnW|TE4^8 zmz3W*ytZ(heChwY#rbL5(r0bN3OiZ^Nug&UlS3n!$8kOeg_ANwVn>7Yryw99lD1>1QlcY`)lrW)kH3(FZdiUYy33yIyhFfeNx$wKBe|_+Rjvhm#Ax5 z@SPd_U`?@YX`t$>74!I_3rCBonb8+PG?4#oG3;bl!4JM~{7uSK9zH)%(_W&>o9OVI z%`Y%+T>4csvLL81`AS;Dwl0=_$&oZbyu@AJ^x+p^k8dDlY1PZVd-Z(n7kAx*9NgQs z7Jd&c!4ngsx8)C~JnHC?vdQmlAhQM>y!9+^nMTzWLv9o7&bPap_(P%MHFPS)=xc*M zb!L(+jrDgwd>G5MAGanNfsOY2%idbOf-J_Abw=#oQz>t~ppLlBy`9Y!Q|y07+|dHK z-rdAFYFDuy56Shh;e+@!P5Fw)(vFE=YtF4MzHeeBnt&sWo8CPw6aY_9as$5Mjvmy{MQ(LCb!uNtn^X2!slvZYA- z1ZW7(0J!;)TxiuOviaPeUH4UVEmKF%)~<%BoRczqPJM+Mh6rB=&hrSVbil;5Rpq3% z7&L0cQ7Rhr6N=SE7BL z992=*>5KV}ncrL4q6xT%tm=t12NgI_me+MG*FuO!wM1_@1_^%YmiRv6v%$sP%ohDv z$lBr!xir%_&Y8JN%L~%smfCfuo;lI=CkaW(8?)DAVk&k-)SGfL1naw2auFpD?`RA3 zMA@O1Ag3z92+7Edm9W_SBGN-OkX^F#@IKPjYEHu_eg=ro=uEJyJQCz*l`3*3XYGtC z%D8XZI+d`N;sU1fh{;+KDLrv_#r1mbxt2FANdP}+bRHJ+1va2EO&KW{j$RP_MncTF zM+~}0>XUKj=PBNuYi>0oT)zMt{63@*xs|Uk20k4@#T3$5>|#zd!CQF*{5$=Q>^S)j zqARkp*$t5knMCG}1rvbsWkVt!{JbS>o0-DD0BRjnE43#R9kr(vo7~`_&?@E+4Lbdb zE4;etx>!8K07><`bopYnMPgI55dCDbSGv=SwudQ>LPpbLmB;35?a4}_fpu5e{6IEC zStm1wue{189BRbp8{p9^2^`qikRBhFIoDbmq4w<%>fZ3H2CMhE!AFGa4SBnn0u__u zg*NWRu|ewk4(NLHr7i^6ue~}Y!N}-qL)QBwiD(fAxDKnBX@$w%W>rNewLO z9%s|DRF?7W`vtc{`ioFA>k8E#plxEjy@g^z zicVh;&8;s7nf6_Ug^TW6c?GkZ+=544K2yLk_7S9s%~%Y0BM|;*;CE|SIgn}lhSHUoaAdye!D%wb-7fQEcr1<1I|VB~XKc#1D`2##b_!8yQ%Myq)1=fk&Uz`_jd{aALE$GookKlsvxW4nI~#IZ74xbV4%7*i@LcrZ66JD( zyl8MpNX8Aq(BSXt@YMJipe4|$q^_YI>g)NkVRU1l)a(C>i7Nyq?WJ;lFhCT; zmChz%v5g#y>s(x8*J6ZQapI7RlvoHHcO3IUSthIcu(>{JT#J8ll;A$ptgC3cuTM)xX|W0qXPGRl zh3*&DGxN}k4mmkoyxnP8n~sXIo2*^M2~1mSc~~$~TbmdCEg}W9B%Y z)vzRndYkmX6a;KYQuM=L4I1;Q6<38@Yu>EBq9tB|db3#sWF zL(7nhlqm+9gDKxsRAV@cvDIcKhsb{9eCYi=;>uTpyO%Aypj`5XoEF?Lo}7DLd+jzX>8|_fzM3Ch(Kdg?VA=acC0VTDB~R@fBlKY_p{(jNWsGy=T#oKsgCFdF0Zx>xqnn?x>nw`h=AJ3~vWW|Z3T~!fBlWvN zA+Y$TfTDl_6jkyo9xA-=V>;?+nP@9WIs)_FaJquvLmL`9zq>0|Ir^YKV2SXR~j>+*xhir6e;-?e+yTVC+NxkkG=i5QPYrIRcgUPEO zncQM7<<#2KUDrgC|kwAl#3ht5+EYKna5~R38WkVqp zC=w{aiiRW*iUbJ-THHOjv^c?v1gAaO`}_az`_6xR?)dM%=Z#6G3Qzd@4Mz) zbI#|P%c4@~g)Fi7%)-W_HI?jJadI4{Z~(mSv$ce5J7Jl)9}znbRFD!jHW{PB*JRRO zgg$?BBfmwW?kC;td?Fbh*Hg5|-r@017~7|;%>W|){m_ECZCS>6qy(~zZ;?ohH4u$?<-hE_sxf2 zXs4r7smy$yKG3kT7Q|Zxzn$kXQX9JD`TH1F*iB0Zyi)Zn&t{^I;{*4*Ukd-7!PCG- zZ7=r%;_l-I?2${(SSQ|8^?v zcK@jt_Ucdd|L+7(vCOzUlo5YJo)j7*#0g3;-iP!7K*`*qwIu^0vR-h5Dx;K?ir^Q{ z;|pd;2FJ-Mbu{&43F&n6IWX##4DG`L8S4Z{H(&f}jz_=x81tJMThRExuIb71iAou; zLX)OMbjtY}3tnd~fAl#iR8UVdeLX|bxa!eQIxbtQCidOF+MjeSH`q2+>GS_jjX(4_ zZrkNd!%eGjbU`5cLyFkR0xJ+d5tatA-#Vm#3deLvLSzs+<0caGTeS9YIQ4)py)e*GvI!FR0YBE>I{#%muO?4p>asovVjBH7W zNrS+~y*Hq)!@LtC`wo+4a&b{CuC*_seQ<2&^aMR)z|0kU-`a_%4MOk77|w-6h`h@Q z-x)d->&92&<$a-I50+SjXH?WGH1Y^k;sY`6yra~BDK+O3a_<0iP*MnQ_x&mShX=8S z*-{XKwqwJ?zDaTh!DJ)5_<;&y@M6+(EeXVBm)U)%wRm{gBHuEmkbH|@4#P`5nDpN{ zO`S1zyuxIQoL9J#F5A|cKa8-tL2h*C&`xjDbIbG>=7)pKB5NU55;~h##)DGA63Pxj zVL+HN3dXI+(H!2SJ1x}x#SZsPdFL|DW4=X9RBb;RVs%ALELNs&|=Mz;1`K#Tf=cP=XlieSbk>A>U`2JO$j9Vu`ws#b^fACLp`se31 zuiRY{!WsmcQnEHVaA|$_e;1>Sz0Y6f&rfR|$%5B(TS?xrc$#WjF<+H1gs{EhDUnUo zj~<8ROxq~j>=52Y(8DcfoIb^veDWQMvMmI!hpRl-vJ<_^fuG5YC~nZ1QOnWsdv|f- zOg+g;Ff%{pCmlgnX-Vg$rJdi-_XXz4HDpFRJe79bQ5g=40*?iqb@|qGDN!4}reV`E zR30}n=d_{WH7Rosq{iLA+vU2AQRu-PTgXgHJR<~{Xrx%DO)jzHc|IPM-%MkK z7~6j7@qI-$x3h2~PQVvv;F)+!UZL>|1MhjehTodEE>+*o^%5apR@t(5WFc8*u%#l& zZt~*UMLGHAIZ}Sw=`&*^A&tf>j_cK450nf}r_bd11b*zZNiVCfHMQd9*!M)@aFbi; z%9HVM&t%TrTA+P-?Nt^YCv~#*AH<+_kw>r@b4I+b!{!4r51rJ0FlQr8-ljbl z^rnfnrcGOmuDPwZ%SzIrl5&1L5D)6O<{$mkpjvD8XR>PG-44r*%kiL9m8t36b^4Br zL+WOGf&iSWv3Dqfln ziDsd%9TosZ0EaT)Bs6T*@v&V6xVQom6{)CM;S2`^tXm~q3~V0g4`r->dvu312T2?c zwGn%#!jwvsP-`8kQfM^0G%NqfmKH5)0RYR$R|DXz(jF$wc?X{7mfLci^lPEC9!Ypq zFdvA0SL0zwv|WW}oCu72Q^IS0#hc~Ypj zj3;YuPDK)RSSN8RiNk6NCRvs<$)$f{@25+xeia3Y#rzn2@ z)eNEkG#6JeSL+y8t)yu1o{nF50T1~8U0k>u2$|rFZg3(t@h()=IHFed{00K}?y(?6 zLO61AZ2auz)HG|8R&Z&fPmuN!b~Y9TFd{@=h(iC+#3s0(oc{jbh4%7%<8u$;1b2X9#n$D*F>bebSMO50vD3Vd56|aW$jk zHid=GT2;5x@9rXz=&_=Vs-aTDA!`eT*TeFB!)#>XGy^>@^)-RrH0DopVIT;rDzH5b zWmLSg09>1Q=c(vDw;%$St4_Kug7WP`Zy>U4mxwxPK#mHn1--Y$m9s7dgn|Zc4c&t@ zmJYOvwJQn~fQ3IktIv}oK}^}Xv$H?SL$n-pCwixBpEB7*M-wpDi49t3=23#KQf4W` zk5q2T>I8ndyH;_}0Qy;(l4iE%!yl*IW^i3KJ60~QCr6p~>c4#pg?RoJH<{c&YS9K? z?TlJ`6(o#`Qqe`$rsLELpz?37P&N`rH&;TPyet=lu(M<0Rl|_&j4Y6m1BsIEF{7Rx zYhc;vj&Xi@;QcCY+q}LLJ+`7^A(ch!idyZjbDe%0;~q=LcB?=6*!yay*uI#BF93!;6G6FbV3DZw!a0~%{gzu@~; zcA7Ct2ln~yKo~=IRToZ#vL{YyJP#`@KaK%@)LRDx()xQwPOhmR;)1F26GF5D`W|9+ zPwwhvyI4HuoV9bXioB`V1s#u8Ay?@bFQBI)OJ5N{k;S`u*!@}a7Llpv!yT@t&2Y(7 zj@rpb>!x$h&=HF(OMpun)RDAX?+m}nqEqljI=D1bUW1^!k>qP9zXxu~W~87%HFBrp zItxwd$JRp0L;5uv$eK9qx7Iw$-BxmeDouWY3A-dL*74YHlmpKTr^Ru4U8)Mbt76$; z<+fKlszPBds*QPX=s)#avMO5%MReNuEQ57(8tya|QGKHYFAZ)D4jYzB=Yw_gu>xdh z2&0cLBk#en%=V$z;Lho^8OVBGVriT3$HVa&4p~(EgCU*@OfnRk`B`3NChowmApZlQ zf(CL8Wbkj<2FX$TH&iD14BFALQSO0`rOSl~BOPVso!cz@B|Vni_ovoxYVkT)spt-M zV%IJ5@nyWJh%X4d2QDxUKx>=I$!l{ZR1o=3=yW8S1PR4E?1`#iRJZH!y->bdRZtnjYHl%sfC z(7g}APIf2eBi9dg7Z{`9;s74NAsyz#fW^(&ba``E9-a4<5LKYEGJtg&n>8s+a4YhD z)O>RJk)Xyaqk!MjPpo(;{4f#ifT|P@OeR|~P`Tex{?so|3$!inOf1Um?na_%zL+k; zF$WA4CK4jc*1cMx>~|{lUKzL8L;0<%Qd4VA_Vfjq`^q_dvtp(s!iSE--(c|wwenF_ z8rC-dT`~98R$kYKf8R3y^ZIX{NKS<=A#EFKC@;@`U3dtMvq6!)R;qX^2aPHSCmlEQ z|M-rp(angUkvJmjj*({nbzTKwoK1E%It!v~s}02!HKv@IVC<9^%4;h`SZSx0P4=~; z$AY)3G$5C4g+!&J;($xh@HM(U+KS&A%&pcP8)l+9}tyw04Em^gt2a8OieH0D`|`P)=3c;nYf zIn)58NVp2d>o`%MazglQnr$zmHF?(CizJsBq55_cw3`+#D$XIHNvn1m2bu|Iw2pG? z&cTb;yia$IjCNOA-gVw>u>fEYjs~A5j&!k`{_3JdZ})SXXU5bQaMm@REp56B#1NV* z-7#|SfK|=8WzD6iO7|^Bd1{;trIEX^Ht!Rotk#`AraL=bXyvVPv9WJ@YO_ui z;;8X68=^w2-mQ$`GplHAg&k4GH%4NtaflC`Pq-3VMe(Mhk`U#RMzY6Azhi#d?l}`K z;b2pnqGap5s(v|T7^2x_c#h)3NDDbtKak4IzDw5}_V$HDW_CyWlg;C+97>$#-w)zb ztnRG-$=p;A0LdVc8P_iVN#I2PJ1%0mPO;9#1P;h2%H)ytI3ewcE9&j&8_6Pi zI$6Zl4=(?>`?wUkPxPWp1Znsy?n|13V5=xkpDV10n1=Y-fia3j6p|L?ENZMiQ=}mY zHf)dzT78Qcm;;Zl`u0Cc*m?JPI)Ia-@uV$#IN-&=_lJcdD2UHezbYB zY5U%fcCK2fADUaP$<;@hgWvXM6eAbhP7+Q(_?O_*DXR8+pBjB7#pfR3`wJ3JN#+|Y zw7RM!Gt5>w8%VlDjFfqQce(y2on8iI<0u-6rG&+0Gm`H6oA8GasE(cceH}lt4z<*V zsza8f>)Ko=&R!kITs*lSg0EY@IeE`&dHV$Rlg>?nN|-*pEWTyFchzj|C*6mB+G6XT zUsG~P72i4dLHJ2m8Q}RX1k*$HIcePfr!GZA#gEro$B8GOADob$*9|NlR{!|6HW2zF zLy0`S$JFz0s{X0})R})R_f3T^+hO2OI%vp*-amCU{8Rn^*GrwvemWB@5nSZVT~2Tm z^s)66bEO(WS73EJ99DA`>-T3qWQR`l85$K&B{qlGja-f^nvMFQsimTrKJ{ssVc8_S zdG9-Uf?$=Y1t5427fu>ehTiJ?~_Su6Ye|yl-3%99T#X#9M^CSO0$A*-YhW|solomcEqb^IgWq`i*W2$ zrB$ra_up>te#E`Bm0=-AcW)lBvD+b+6>my8P)DEqq=QTx=TFP-$qm&C^+TRW z&3Q5ZxcbLl1hKSi={4*6{AwoAA8WN5q_zUm8NxT@+467eQYe)Dw zP+Wi)YH*G;VvGwcf{apHHACY@I?vQas7yfQZhQ1Nv?r5(Ca$L$i97nE5P5f505TRU z0;g&@b~%^b)9Fj%bSgsFhH~J)WfeijQvB}!c07(Ge%F%vX7eN|-dv};Yvh88{dJe5 zV{fYYL)E21b(*Ke5JfWy!$41xyn;aeTJiU(3&KLlqA+bS&whDam4B?o9jbOQ?04aW z^QeT&;9PfEOMnEf`VhR=u#b0X`Mt4ZdQZQXcks7pdroJ5@c zWJ_%MLoSphC@@<5R9Rgr!R1d9r?VH5&?(VujvqZhHw^sI5hnr7X^cvlZqMH95K_We zBpz!wYiVefNVH8SGu5_lRysG68zP;%c8Y>v{LNZf#q!xOvQpJZ`+li&?bEhrL_m*E zVNBcfnSf%5^jXVUPkh_&$EuBcAl`{9DMhu)K8AfWd7O@z&&G0c#>!>DME95z-oYL+f^em;bTa^kqS;#Yp zZECvI-0(0uYf!%zT%PBcGRkPAc}^>OfBtg!q-$L_l6Ym_#t6|%e9kK3YnK+ip0M@^ zPkm%tUJWT3eHNE_ogiciXvv)x%qj78^v|xb1{?O)+#H!zt)9`Bxfkl|6?v@++Dt;< zOD+B}RPmE8EA`_DYf|W*Q`pz=m8gEf&Ny*T1iI$pnV3tlBl0L@ezR7W;OdsVJ@0J~ zzOPYLcfhI|+CzCY3@?nk8#W_-&Wk`PN`hInzHU{|A`EN;4w6W&Jvn09PzmH{gE?gU zZt`l{ri*ThKVi6QBK-UGA4AC1XI1r z{(Ke_g_boe(3#U?jr&Q_^DL1XPU*J!dYx$gDCpcMHDJbA_E{l5rx1$*2${shzKu!t ztRXH72g8T=zEU}!7FLr~j@-n(bTHYE zim`NJni7IrCOpT-uJD2^Qt`Pz|Di0t4;~~ zM3s{C%5;Ae)uRfE^V;Exrl%1d zZM$ZbSu7OhlUnzF(joVuDF2=DYH$8~Q7UzU-#iebkGU?03QvDq3B>2#yNU64;Cw0f ztbY1Nq*xlEX7EB7r19AG24PDpVY}dmXBbPol~`1pM~7^&&=$L(>0KV0y%)DkJG_;$ z2@e>827Y=pXm>Uy;Qe70SBh$O5{&u1t*3XO>mNzg+~pPRloaN;&K=zpcN)=ZKGW`I zHnSIuMG7wHiBc!S%UO)`PJ|ckr8>-bi6&0c18d=xxT%HKI5h@5q&B!F{Ovxhj{6%+ zdHLN~xSi^tNV?~^w^pf{ng9CLFL9m= z!JCQ(?*}sVJh&hl01T!+_yy@7ZpV2mc7O}>dw!=OPxm3-Gjx%vezPs5eTL91hAmsd)H}luk z--@aH#=QYwuqhTS9OY6g#jfvqkJ++l^4jb=&w~J>=pxkws!6tah3612^HO$Vk*Z?UmzVydwu4Ld5Q!i(T#rXc-!t#_lS)K1lS#=b0S!I2HGlQSk!Kq&X=K?y3{zfF$=_r$l#EFq$7fO z=k>ZhMIly8o}X_zh25J~AElvfJ?B-k1baP(Z$(b8ae=#POK|{=E$Q$|5HO6aBcJzb zTD^_Mr*p&%nbEuaLIE!H0UKeH9|p3qTnjL0+Y!P(reSL8(&NI=fm=6iEs&+mXelUW4x@XjZ?0bIGhv3eY&x4(Nw$3u(B zqE0h&O-F}bBru`3>7vp&CnNUc>XUP~#7d=Zx8R+Nz^*YI#ay*^;B}~iLb-7v*M78? z8t<|r$}rsy#`}J7$-JdrwJ}6a;?q=`*BQ&<$MhxB^enu7PeMPv)!9&YFZrZ7knnkg z@LeQ)aKK{N9T&XAN5`;t^P2$Bj$mgi0lx1oHddm$$P+xz=j~)T>Ch^?zT<|6<9VIj z#g4KAuAik-^U&5DPcom)C6%k#x**Y2jqn*|x$LCIp4qgf@zB*?gqBq&91*&|pRQ}WXAotD;D#tLsmRxCHAxn;?cLcp zcgd|iFgS1EvO>C^KG`XCOBm%djRn!Et=}WoR^NzjlVWk?jo^X1mhd?2r!ap=Vb3dW zC7aP&6~kRS|A=_N_3S~X#|_d@tsL|)S z6%JttE=3f>1CpLo(R=hBVb+{&GKesusr?T=;?uH?B0i(WqDGEtTfR8*6m`^FzEQgbo;E0+!tkyh>qoq-^Ft6)EIZby2Vm!=7dGpHqwFK(|wLNMf$}$iB z=jZVsfBkRW01_7I)_2Ffg*{$UhRwtr$NFE&I5G&X<8_+XiX4PPjl9@&o1aeBVZD}yKC zN2Z0M#UM%o9nS2bOk^{vCKgqLF0NtII(kvD44A>@+9|uxaDB^V5XAwkx?7K2%#CK4 zeR+$Op2=`&oWoN~Wo1ij*T}o?MG?%VmopN*jhp!8nHte~EH<=lbJ_nct1sZNTV|%L zfI*GWYNW843+?vw4=PZ+U?E<)8hCPjUSO@M5 z?klwv5?f6kG?Y;eJ&x|*&@^V>U`|OSPpn~GaCs4B-LXaJa}p3Gzs!2!HCDwKTXMw> zbnB=YE*zI)ct6jJ^%F zeP2kg7)4~pKTj0*UJ&Lg#+R-Cq+=|8|2?bEroO*?X!^>sUOSCs3Sn?_y&9I!76gGQ z@=M_W&usOHGDZH&j8}9jM2jtOpU}u<4e6tPi8vk{_OYeAT<25@)5nKKB5u%{O)v;g zHlP($G~gRouZ+K*9Xy~9Q{u;02uAYGx+LldkbZm@M^nb&3_g0M3eW4OaC_D2Z^X!` zk@eC7X*M~H1-(w&aGcSN+;)sqtJ{Pd>bfNA`$Gdrd+kY)J=ka^;vVqIr9|^)$sT@M zm67cVhx>ME&qr~s-2O1KZm9)43lpIvr^c&KF17Wh-2~1_4DcjD?-m}Aem2fZkl8ol zu`3R>R??)s%okB(wnvMwKOO)U_dV|sE1PsuFH3NRmpEkW0J}Y68KHZp{4B*2$K@IK z&$~amR)hma4Zo)(w}tf5AXs5CBzq1 z=iHfZMGKr=t-JYB|EM1RtSCFWu)rzBT$P!hPR zz7{#6S>v`l`X)w6Xe(v=Nnm$r3D#Vv8?6iT9B!l^a4SGgKURc2uCflr?TKez!-`s{ zR8M5qTHeWt5|!48jRn*gHM$G*d20Ahn^CkG2m*jCGT_U=EF$SAow9Y_D^A(d*xAtV z=(t!A^`N8u!}GO3{jR0e9odyGWaNbN_V-=wxX6{$;S)$`NUO4tU!uBOZP(|68 z7n;vj;(czBz+yKFv_!0U4g>H_RZ4^D&)oTf6ei`;hZW+bYu$J@pL~S0U6wYG2vo&| zysqn3-hLU9u9qJgqk7+AjJis8=fSE;9;M7oC|2qD2a-{pp0!+*YIh!S-Eh^96s=|> zFXALr1ecGs?TRzgFnK%gnmv1Zm%8gL{g!k1!(jqauVUS-OS|hAhdx+{WE7@-(N-KISvV zN3NJ-4kcG)^qS&PY-R40S5^2UQ}kUGg(3!?_eKSN4UYO97o~&C63o@>x{-8k1In4} zIkd96OJZ<&>nv}m5^LazCCt4{_EwWU>*6E-N8Dq}=Jr_Kjlek-9kJR6;x4v37^}Cl z^Ec>kf*y>K_CP$)Y{hnGZ_MIFmxqlEk^B{R@KrrJk6M7yFIialm33FP;>0AzHiipy zr~O_XD0sR{5WFOTAit!Zk78}>k`@ve=;+Q~Eq@%J`$`a-GW3sw#!(T7_YsZGxw^NC zH(eTHNt#x%Jh^=3+{gy_-aHGeeNWL@q|Za`%vktay@xGMBBlPBYnw*J97)dxwZuMA zM$(E_>4*MdEj=VbYdgDG+kY?r!7!~d&5d}$2&nc^I}a9?`}`xnGLY+ucLz#Mb=_f)84*@T$SDvf@&u@d~aix&zXsxe?rI?{b|i=Gxl ztgrJEYEE+sV#Yt4|E^%4eGoET=XIj{@!D0#&NE+Y>h_@U&yo@nB*4CfqE1^S${y*Lw}0Lv;dp{(S2f4VzSxIYZLM(`E1Py8n8{nTB> zOQq`vP)X7&3W)~meJGX;V3DD<4dk|V;qzBC8p!sixvu4}803t3Z-;&=fcw2WFV2m% z&UizG@A6df#c0+EUF|Seu%Jin?&7%$l-(<8%R`&qnJyQVxAYE6+&V*MF$m^ekY_pL zG({x+Mv5}h;@Ca2z3ZkHZN3kftEf$B3Ir@O5_IiB1}xqwJ82WuT;+4StA0BbiMPpR=azO+Inh*bVOuO#C$i=IW@Lc@zY2$MERlUUJ1N^hI{fUd zGi?+E-c=^0>{^$(miiB~W~DTX#^plHBWw&~tC**caJ6mVE(!74E{LP3%EJg(gekd& zm9V@uyWb^UY<@R|qn+#&J%+1ds~Rr18Cr4X))D2GH>H1sYn|2|cwlglHWl_Q%x083 zFC~rxs=}0M13l*R*u@e~-X(2H$(1Rc@p(sL0sb;F ziP(vLwFSsfG)u)FCZV-6sD#~ezCu`q3uOLuPIiU-`TQ>gNG#~1q~k65*jg%E`HwhQ z%iBEU+MC?O0eLulJ<8;L>u?H*hdOLMR%_xsT#d*V19zcOZS4hC4{x+onj%MBL#nX^}p{jZULFl8^nf z?H(JIA3F3m`Ub^V#}y1ASykk#>)^OLb(ZO?1kcJ0#;pLmwl+O(?E;gVKcd`u{i&8= zVLDJ&$EG7?nUg_7^VE6NQVQGy7$9yv-0@NCH{$}YnTUAx9{)8QAn05UN2AKVZ@?pl;UDye&C5XR9;O#BWlzjKG!K4e$eMn73bpLy}Ra|+oHfa@HlTv6+3X4>R-$Blt%Jq++!a_mI7Jq zZ3CRaYc}j&@$9Co9jXZ{JZod_gwJzsaZ^WO7hvCjFuu#C=!j*SeO!$gn~xX$cA15n zSSxC$;EXicBXdOzR@z3DG%u(RdT^n^;4vZAmXA33%ZGX2Ts@MxA64~4CPP3;-4P>~ zJFko#S@I%su%X3Z6$apP&X|zdW*uE&-PBqVnnQLtmlxKfJ?xI(tzL&cU@&4U=y;Z& zo0ZuFv&*utin9x_DlKm43|@4ZiPn2gz!)2W`|C&Mth(LB4F?bgOj$gEuQh6xWtUHL zH>9d+faFX;!XOyC=aX^&) z`87{bjQg+*EAQ-f)>0V@EfO42^k7~Gp;4szBKj+H$@uS)I*N!u&>()^5Cp-Q6fq2DLv-zYTu$i5 zE3}EMhmL6*IT1xxQ{j1E7SEWB@H$y7XB%*CAu^YKWn#~Yzx_!U_bFw;<;RlOy`cRV z>x0w~yDPuU2A;b8FAcxf7-+wT^xwT!%QQ>$CBG_(tLgTP8}893TkY0R@^#chE=fkX zcW}d;R_8o{r2{;kt1S-i|0Vx7uI@~jmJLmPM7DFf>#kYv~3Xo|M_^fh;J>&hCP z@%W4{#(Birl@IdXt&^9_<3Nf>JT|bcEP<~0`Y>0aEKEj4B&XF*UeEJiP5$?Mjn@Kf zB`;f>?~0Zm-mosk8=uqNt4=zhRQpin&oaFps*b{N5ninlOsAjcqdrZqdXUXeF8nh8 z`l}`VYV1sG!k9ymv17iA3&PPdp8)k?UY7d4O2{Ro{^PU+No1 zLk87Nq7VFxp7s7p{k2~ud_lje!an=rU-I=kj|JFpr*3@vq2F23XJpO^y3SGMNG$ZS zDbwg?z4Iiq#d)BA++tXTUpc-y$?~9sDF5MK(*Mt|uCO|WyuQ)0NP+;lng1gZu6xt| z$!md@MIsr*$&4dItRlS6nZ?=W=Y~By8f72GZ0Mx7MDYpfXJZO*0SULq$KMAE3*YTbHS zcmG$D|98huvov2=XCC2qO8hGZ-qtZa?InQW1ymcDij+bv#)b`>X)$mF466RyTA}5N z9S1GR{-4+gfFmbR!LViHW+M$7@p$l8Q~upz0RPN)XI<%=d74Ba%=$`cr4TJc{ASI# z;r~0d0SykxMGNcdJPmOI@nFQXt(?EXkYJZ~N+FU+j%^{&1tWmrh#xvOKCA zIm%>PgpEEwF$V{&ZBkLcY;rQ**%NI}m z?`(~7EZ~R+26RW~ew$7hTPgCAd5^m>{}F*!q*8od{4bVG$JH5~H?09rG_MCb^!){C zc%zMUlfRWB*g#qH>^ANYexHxs6#E?ic|q>{B-4>KwN5V5!pOH$0t9F4}`;2PT&!P_?!R`A=-+5wI zUXNN`|8dsfu)|N+FOxPS|3* zx4{w_2Wcd`mtsC#Y(ibDp$utprl$3AA@iRLNnLa{hCyOf_xzj{MAODy;&m<4G>hK6 z@0#&*0ZN-%P=g^C4NCE5(NW`Lgm4uxp@aZ;3(?g!oJ}Rah^v>G1SpjZ8SvUX@gcX3$D7^Lbp(5U;(;uK((amqtjg|Gn_Y%kkv zUQSJeOs~uD3p_S>HO-B*b#*tSWy<2xCaajT1V3ExzLicoNVY048jxfgr?9e5kYP29 z`PH!Foc-e!ZptNBB#6Jub3r&4JZvu?EA2RBdt=S=_v9EtX>ad+Ra#o3pGH_$MC;#* zT*%52j%5kpDe@3pjJ~^&%);=LXj}n;!)er(u~R?kFsZW!u*zfs8(_qqX)Kkoxb+7O zb9gR=Ffg!mhI(G;{*8qswA{scF^C}rSB6^${53gF4r2!Wi*xSi zb6q|lyD$ft#R7>|9&@55FEi`KPNK=~KW6L4)!RAP9kcg}E7ZjbOhvuky4HG%D&B8)Y3;lW6tphZ=h^XA*O2EDgOJz zwTR?g;iTh67KNf|FRM%6p0Av*uuYbE4Dnd-=PR6mryxjvpC`Wpzm#37>n?3uxg=-i z$LFJa;)w|uuWcWH{UmYPid!nZKobMJE{X}xyVv939RJ6joPvAQN2yrFM$zFkhPoRc z&BeU^%_hpXwe;v^Ntzsf=uaz!Vq-PlAFNDWuZglZ@z>_nSQHyqT?fK<$6VoV66<1P z;?TWuPBo}*p}m}zB#%VfvdZmLeXrUBw=W;U1VwAet8d~|SDaN373r&CktPn`;)-{Q zl}DZ!3)9Zr`DbqdW_g9+I7bmy^!Oix>&9(-2HVBJiAj>}Zgn@KEur-*TF_X;ozGbZ zAfKUeqFKM8qZM9@+e1zZQFw7F>1G=CE@U>6JI1mlK!IT1E%;`bU(%>N2tinx56)t4udd?PrD z*A>t^Kp@h%;AR8X2{VO!*GkSW8M7ZeOPeUP`9z5ve`5CDp4r(?(W%U>f3S}%f&59w zkSRT{ydS>i!?nij1=WNakaZwxN}=DB!wKMFD5BTRO^(Zzt?GV>7M?JS&J$Luch`J7 zo#MCvI_7P@>*Divh6*`ji$&(Cn|G$gnw* z0SJgF9To^_(J-z?n(ET8lo0n_!&J0LG&jJ&@PZ!;epKx0uTe{j> zSlXMlr_QVLgZYf-|H>RjY(Dx)cOUo$GO^4-K+|AOUGs@r+~3R+;50h7 z@wx{xYXhPxwTbot%RGqTd9aGi)q9LlucRYSq~W}OMmc|}|5`2k916tGJ!V#pH%=s( zd2e(N6+LormlzR!r}7se-N0Xjbamb5{=Iz_y-v#Lx~blE=RwK(xj(s7H8-TlMw+2X zFr5wubb4}HwsCR33$J-6K@}spGeydHV9X`hgeWNQOq73nlbJ}?jZ@qM8!tu8vMY9H z4WP$`mnS*-i<|}tkiFxA1D)Lh{OWbtn5B%62-!zkj77^bLh!6R3*KTb(~Mn|9JvVeHb^nP8?cx z_5>Obiw6{sPYlM5O2#iY`EJ0xo#o?R4qj`HJ3zJK%S6F6AO-D6`xphP$eGSz$dE3c zKvZI!3otLcb0Xd@gd|Jn6ovc3Zo(WrrGT7}Th(FU^koN(6AfI zD7J9duE4^mj<4Ev$LWh%Cjwnfqm3h468uee?)BIoc6>VGfQ{lqHC2##%~sxt-09ca z-X|W|&lH^Kx@&BkU;-@%*r4Nh;d1jh^eTHHHMV}^wBF?ycW)h_mNUoT5IUkLyq-qP zr$|c|c*DF-$6y;=AbtzxTHoa4IHQV-C9NbufAWhKzWPb`a=$F$y=}z>a>q9^$#%Gr zWVBNu>{ms@iw+R-Ao+NrGURl8w|W>cpVK+inU2DcuyNLm9Pg^QBhf~x%2|Suxh=6+&qnFii$9B zuBx#zy*QbpMnI#LH+oA8jgg#)+_C;*?|=LjF?)6)SF zR&ykEnV-5tMU7UOHK{P1yLd=W>Wh0Q-@^3V!HfKtl;$ks9u7Nn>i&72ZL51wsaKRKQgzLM;lWrG6)UlcEihh6s71}}Aq z(Fl1dPEO~ejT#CN{Aq$DbD{Y(r30=>Y+qBcuz>i*>=N^@Ig2`qHaZ@G7@ud}=WcR& zPqIEX@VAp!sP!G^oEwIy?%uc}W#9VYdy(`ta%pFrz>c?!%vsfjhE(Ggsk{nP?44wP z45(;21XQEfJs7W7{K-jI{~q;sW>H59* z%|}C7KqWV7_U&Ij>9j#wS%Nc5MJG?mtC2++oSKAL;nbl#Tw8tk+7^G48$6{%Dx==R z$ZXoHm%GE(C2U-SIsGJ7f^cAPZB*gzvn@2h@5r~9NGtDj`m>(@|N469Yh+tg+&jfv z1?u+PyYMU~C3q8xai{sBK?!0svOkn*+Z-y+$b0Aj`atvZKjklp+etp1Okmi4xreMC z5`Ugw6rZkYL9vGaq&p;1!b8I$mKeRp@r;uus~;+&{}m$5kz_Wqwo5@`Q=@FPJr0jG zKFWMy`r_U^mv{@7!O@^Sw}41RFZryo$!QTe;rRl#y&wo=}H>C>ur#T&?Y+{AxGcsFr(Tdf2Fy)mgFmz@0TUsreDs zUfyyA(y&iUjc=HD35pTYKEFrWd3R%f*2zw5NM5JejD4ce%g7{>1Cd4eTzTqqiZHPu zs&I{ZNmDu|BXF9Py7i91MRwel5EVBjCSlO}Lv^qwJodPH=alQ2H@x4pWP7gG<%qhs=WVVe=ucK|?T+WB8Qg={u>5<(;Z%w!v(QU`o#O$K>_p+Hkk$ z>%!w(7mEtX(m-zt#?b_&Gru@(`#|K~W2vb-qu(1Nk}j}NuLF3kt&a$`LD$W>za$}4 z_ycu}Tl5?KdG?g`WnV~??Yw;Vl!X?4pu6BV3FbQ*2Ku!>)) zj*5qIIf{au_i>5rUAa~(K42mIyXJ@!Wv;*@xxLV`_9&l;!C{m|!JuV>eGn0>totSt zk3ft&you%cE?SYFZBMgq(7xE$xmV9uk7K@Es8O2azwJ1Ogd9#@HOBFPh3cZmiF?E zA(iE_?f4>>NPEhqNQ$@w#q(H)#FFeR25oc`rC{hhP(o5}~8R-J1eT{8r_T}E|=1bKz#P4W$vYIr;hpmi?!zx}Xs zYL%GaK#xZZ^lRuaU(xtv?`V{@+jovCH#}cZ-rxLD|L}7gpQ$ahC_D}7ua|Ig#@|&j zok*Jc796W&r-^@22D$QWb!S^r-D0tW@C+wjYo*QsO$SU9WaTP)(C;^FTT=p_2$$yHgrS4d);NRiFg+`#Z1 zkKKr}{TKx{{)*o3L;e9LXo5dH>fI_Q?4a%&^OSD z+p%wcGpVZfIBGMDFzbOTE|=3Fx#Eg4&kxDFMUGBZRi1`fXL^jGu61cH*&uv|&E=EQ zxm(_E8dizLwvQ-WFq9RRnR1m}doY;(Hh>Qd%#YGQ5TelQ_nj$ld@hlp+@c|#iavX0Tm_AlX|_(>}e%p+$B>9 zeSAhnz@Ftvi*Q`7jl*ZL3SG`A9PW<#dIhr@5Lr{Uh6xEjN{NJsHr}$;((#{8zV)K{ zKsucR?-~B-aR0?0Miu}uc4A^H6^ucjC`#2ZG%iJ)T$}|p7j8bUEl*3>lx@RuEdT`V zl-Hqz&tl%)9QuPz7i}y1jWs;xreQU*MU=of1zl|GW}?l|ILsdgc%QKCo6uv!S=L`9 z`GK(tPI13vbG393a3IBcPCh6N!zRvJeB}P*dJ~JZL3wglpuY|x368##tjg$K@_nm; zznm}bo2#c@a(>Mid7MaCNVICiZbvFABHQ2uyTC1pXQtQ;PWJE`&w0H^FY8;LB}uk^ z_eZ4oVFQIAoj&#e@kjO4hvq^KAL}fHvZCdXq9U3Rxw`pjR|FtY6TgSe?BSi|dbslh zCA}qBEIxHCC{r*>679Hq7}3kG+8&xRUzdkSf*IJSfvbhR-8ysCIJn#=l_g>&&k`gup3F)My5 zDyrtdsLm+E?Gwb!N~%=FB@-J)c1a_O$7Ko9n)mkSrM~d4oVEQxC?4N{TqBZ8gl$y7 ztEMB&2v$x9>pf?AeVn3x@+C01d_{yAvfaZpF7qufV7xtp!>>ayjP3o#y)@p@v)>D} zRY9ey1+Zh}IsYSfWS;}R5-gU5iL&)+4VAEIFA+wipTv`%Ng`!h$JEJb5RmtS4xD43wvy#Aft>S@L`-?R#m^jQs< zD;{ANhn90zjDoC?p)f|~n$3$7rZ+(;Vl}pi?+;l`}im0Dvb9%_5uP z1vD4J;3MK|Jcj^Oh!Ax4!23=EJ`KpPO&-AK8Uyed0w{kk2#*Ojvd^=|oS#rRffeu5 z9<@ahuQ6 z#kT}tXLIAVAuwloPhpWlfQ#wml)|LTVB$jif?>zm4rWcVa(@vf|5h%=HNmM=!z~X* zyhZ~J;n}7guRyPCEqPq>=S(n=V+lx2=R~m>X%$Jp#IJ}B>vBJwOY$ET`igOH;sc5) z`wI*g<$OI%dyexq`_Rfts&2xVvfSC6{6;_pRD8UZ4e<=@;N^+3;*BlT#o>ZA+qN7f z$LH&93kj(2xDp1fKuXIt&k9b^-BEsv)92KBbghOo#G<6Yq8-a?K9GdiIu+tegF9yr z=&o(X9xn<;vJz!VOeV$VRNaXNY}kWxWJ9c4q}f&gaR^Jx5a+nbK!F5nfdR+boa#vS zw>}eM&ESHXK<%9|?KMk$jl>tU<@idjYv#>s zn26o!-Y4U;RMkGNx}|6uPeqvBkef6?IX z7JL{W!3pl}5L|<6aCZp=f(}lw;O>&aog~-tvkg5wNH!0l~k8N_|H4NruXam#Sx_< z&*z9_nzCo>%O?u>fQ83*o9>>om52OP2JeP;oUX2`8)Az5jUKg$Y63gyAD`8fom7Us za5eCU=(W;^$j@f(#Toa~(tmcv-fJXo-L0CO7;JN>T~%z^j0EV2B+?{ z9&dMJ44L`OoazsA-ViF=Py0LKLdEe8Bok*JNu7#o{Pp+bd1#v_OQxzh-sm!~GuL|j z&{Z?g(Ob_n-rTvK7GytW6AGjAoJ!JEe$0xj_J)<@2$|;(93B)leS;O)*)Ntfq5QMk zB+NC$Ae5ip{$OXbN{%Znt-EVaSw&Xcrx5z)ZEY)0O-ZaeMchnV&Bqq7WKz?pdQ$9_Jy^$< z{JFEy#W{55c-^E0?K6$&d<)+Onn;~{S5RJN{3CX-qI(QY z0rvfP%4f5Fs0N>w=9HXjI^f8N<|m^u{YY#0JNrIc!#(@!D8eSv0U_zN~Vs zA}_9*h@5=VroyqHpk%Uw(4+!~v8K_2Q{SfQ;xIZpqbdwG^CGQTrBh`agX^Z!^{41> zyQ;jB5_6_$Edc>zq&jq``nZ{$m;Qxyy2{QLDU|vH%{|KA8tUn%;9qeD40iZAifxdY z#<9uXjcsGgd2e`UO)kdWjSQK|=UIcL%w({#y61Z)5@3;rGauE7OO5*e>)=2b8wnFe zXI#aS@$7-_YPiJX=|0OufZ6A@q1jNuD=ZTqyISY5>{w{vOu^UCt@r0m~QowQMr z72E8Vk|a`n4!lj98{AiI)>BNo)iTE2V^#dIX@+M%%qQh2S!Ft#NzKz#j0E0{7Vm`? zimXm&L`3Y(qX(fEWRqR~SdQOmH)?Mo&Yswpv2X5b>&j1yykF?!8fcJvHCC6nP}jTcr!G(HGKQ&hbm$OKjA&3sX&; z`kwjr`4YsI?u@^Dcdx|WuzdK(NExth*Cz#8OwO|H4fyQMZpCc8-oq#t!H>2hq~;{c z)GVhcyISqUiJne(v5`_C!co^BCEAeh?jaR#uPlhX!bBXLn{)|mbarYz>vB?tR(a;_ zI!SCQvXhtB8tbA_2OWOyYbm{+Qbm`QJJR2DPZTWhfL|D_C#Pa1rw(Y5QW!n<{n?tO z`wF_rA#y2rU6Gh`m*`J~Okx)=yk_;d>MN(LEJ164sAANt;%uB9Ulo#(i@Y{mq2zCb z#o9MwzizkNwOP4!fK-}dJ-f`xB;(imRhjeBV%n=ziR?S804u)soIP#D2fr=rqLOPK z)6yTzHJ?~7c`7`Tx}Gn*Y_oF{QRj}HghCZJ(5L|NF&__}>jl7PW|yFGFd1FF5Zzu7Vdcs%piukFk}X@e7Mfzz)J z{HfD+PNTC6!!HBA0rMWljY-g?VT#|i;T09_(yEKb>Bj0rJ;=?*H4_7h9|wM1_?;Da z!t1}^n0FHrncEw3?9V9PT>|?FsNGhpEPi`aY%#8%aXb~L+`DL-7Rd_L?c96cgcb7Yf=`D2@u5u!1RN&qACxIi1@_f;Hx zM$SxDW+$SC>UR0pl?#0h{k=umV!3r@3wEFt%ibms-CQemH~khM_urNC7HU8Ax(XBo`Jbgb^o! z`zF?dPU&IuZ(f}F5;fFydez$T79;SpvwbJUT3Z@v z8~rddqdz`UVF4VGd9`Yq1+GVYz3cq;J8ovw z(>{}njR=n~^C8x+FeqOLA^6{!R{xz^?|Qc4kJbO@nfgL^!mpP9N*=%Gr|NO-p7J{0 z;5+(HN@xWe<@2KR1!emU)?LKEQ*qb-H{^L4fL$P%-+3r2AZVKP>rr?rEeH{xmJ2_x zPkYv}Wz7iCUN6-|O0>{eBu;4yi1bx7HhULK(zf?MEiIQb@9okD4r_`ntu_~>^Al6d z+zVf#SpPYn}650;Vlv;{|0zAT!PHyWzl~p9kO>3^x+<5Z5zDG)f>6bl9Z^?RV z&6DuVEU9`Fda_zNCXp~P0lPzYXqzTDKfpSWS$P{*D?AW>hkN_=S*#o<8C2K?RMTh$ zuxf4V+VXWTdvor%3Wv{`Lsq@4fQ>mUoO0kHSmuVbL$n=zD&Cx>cTuFL}p>UUHbIxm@mh zcPA4P8iG2b}{BBXTaMFpatX5@=HqN z_=}A?tCeZr=et$@s>386PcSGK`!S|K=>C=5uOIRN97HEA6d=ClnXwyAPoLva)}nRR z2#ST(sI(d7JE*T;w(&E8N(;ggrr5b-WY699ja00Nkz&SmZnSwa;slx-++;Jw83i+i zubE%9cYGExElN_b(LcQ zjZ>$iDjO61=krVy{;~vd^Gr!4zif-bJ&X$H$iM}`8XGQb1c?^S8IhDLf;s568`3+y z_JS30%x^seLgo(Ar=s3)r?3Q6;{T?7vpwfy#Ok4|uU*+-6zcgO2n5UIKy_flHO;kqN z@6R)486#RF#L=B4o=(3EPwlET9OCN9`T6<<$2eUVGwxh^`Y=ehz}k^i4(UMXUix_TtMa?wIK9Ys@( zslb&5iLgE`cm3?k&S$G;!d&eeK4B?onSHNynq&v+P;-D+!8U1QcRI34nYtTq;W0kE zuiv{7z`>#WN!`!moa9o-!7BB;oKx&REn}f9ZB~Xht?wm*QmT@w)_kiC8{tkl(Q8&2 zfA60qUn*OS!VMYR(vKSzlx&;#T_^h=wLg|_^Qe*=vr<|02gEVmeiBZ~LCx9us5l3# zf>2bhT4ZZ&TE|kpV{Kic4SV;4pfRg^9PI^bXrI{+r>m9`gm`_}23CXHDM1~;SjTa5^ac*Y5uvy}#cRVFM+6~8GB zt8;uz@Egf8ocXdoEVig7YHJN6xEs_veq>uh=gIh4sE^<^9lhqujs>nL%#TwvdEviu zN?L3VX3zW~_n>m!_bI*JnMw+M(d-RTz1v;UL~T|RM~p@$WiRdL3Hj}Qm=Q^2S!88{ zare~J>IlUTIqHk{evdm{rdC*v=dW~}Vb|%=Y)cZYkmY8Kc){M@3iE6A-NDGRqN7u* zT#}V{YYQ&G%@2K+Ni44uDH01)hzPE{=&w}pr=(mmYKgxyA4G8;o@zilDz|^%ThE_F z#rGc(@-%0(%@4w+<112PwtLG~MiWB|6t+$$vSsY~_o`B>t!C+^bjJe-$!f$lYWa7K>cQ;NO zqhEZq&*S#ti`94Zc~{dE68F4<2_z>f+pM^`7izwfgHyppCQsyR1u3`s=~$#XGsHBm zOw1fPIVry_D)^j;uexI2eh&R-gqn_(p=8<6+puwiHTQ{+*+dl8?nFRyEq(dtObKjb zIc?yejv<~UvIa-3t*ygf`BUHy<$>bCSGVojLaml5KeS8|y%009e+1yQv7b+uRp0Q8 zo%0-?RP!KlV;>VLn?G(y^Zo}h{8x%1#;b1b4UuLw>XgL`clu4$QI&1O@RP~sna>N` z`qVDmrgarGennBV!(0BRltnmZ{4s35>r?DFjL}h ztkh0EKRe|9{MOUaN#wj<_0Y&*nO)_0VoR$1VcgJpG`D)Xs7i^KSi#<3LtNn1_o})x zw(q*3>yEuQ&m0YLvb9i0?0Jn!m%l*yq-EmdBlt<3{63_>gk9@|K5(sIc~6teGn33A z<`|O_SQjG{uY5zSlLdA0AzCTZt;hMh!AVr9g)q2NIEvSpk~w)06*0oyq(2<^-x_7iBsTw9U_Fr1rvhH^nGe zsoNi67M=_U3-yZ;6PH5+aTL5bl4OQw2c{|Z z3caXzvx1a@WsJ{kwc7k`PXy=hyhD?geiM#vl8g&wnrxGeSgI@j!OwVoW#UH5OD{^x z>mFAQZHoP{Yj3_`LOVGKdo=(kGJXzz^w$vpo>`BtwnbTyUG)s>P`NWbU zJgLP`N2ebMS)UqvWD_ca;uEa+nSYB&kV(vLGYMSk@xowy?*l&h zp5Ze!Psopl6UmjtpAkr2DJvfY{mkr6W3cZYdVy!B)_UJ zxV)W=JF`y6$kCGTO3Uyc+RTEd@5mQN6AG@AO>y)68o|ediCOEd6nb1^+Be{<%mav*=tW=(aHMqxw=nm4KB0qY_ulj<*@tA%NU^BGoon1T%Pgi_1$dA9;UHL(wP4wG1RcARX$0|%DAWcq)+C5DbK(du;a zC&N@@9eVV02U+I6fe|A-I2}HfNijSxVVgfw5ha%>O-5*XIyhB14{jbgWlu;VjW{d* z9~OlBzbuH9vy+FFlZQKq5BO07#I9^*X=^6w>;p0YzP|$Ta&Ut91UNxPAa*q~Hy|U3 zoB!{O8dmPko^BRa?w}__B;(^Dr|AJ0p})S#ae=u0rY9;27$<VmS48=EfY=or z&8)2?|M`^q=TqTHT0K>BJ1dL7`RZ{9fc|C$PEY&S;qiBc+(DcGHD4FNaH;{s zo-F1+hW4q*zYMLUt%tjsl^d`D+{M}HsTue|>{8AS&Tg76W)?tEX)7;V3o8vdK$ic} zCd*sdTHAPlIQal(n#yMG_8{KBYpLmJ?(rA%zpC;tN&Y20yRNMzV9NP`Hu;xX=j0IJ zLrsB2DEI_XtHOcX2xDY{QNoQBFGMy{EPg3>7wF(!>Q zqk4_iZpsF?7Xy+;igg;&y2B?l_?Fv%l1OVxI+movTnmxCEdAoncQRL5+1j(dMX~m6 zr^%?FPsSebSE2fId&y{88ykgYBNx+6CZe?fC9E9-F;RGwNSc{B$-2wR7%J`4aV1fA zvG{2VK>EGYse#ZQyKQF+GBUCzC*}vSKes`}uX<=LK)Qj{oGU^1LF@)b-vOy25798< z-DLZ}_LP*UexBdH!sm5Zq@Wbi^BDRf!NIWc>+#YA3@)gyu>%C^?Z<>fVq-ZU;WM%5 zTQpW4i%iYTuo-qpOJg*k2iC}|T3W_TH!7pjgw7Jdh}2p7_LZtGe|N?byi_f=dX2k; z4@}5z#B5~}KUI!P4ccKt%>L#@0g@X%3)pQ1w7p;3?c~`=06gXW04YY9ebaQ;?H{=a zO<<;}eOTq|zKNaYQGKG0#*~_~dcMo`=V~;_HgFPuH`s8OoND#@*Pn&G!Iwa_91w_< z;$7GKhsPP$8~9i`5||%^p(>@5d$&(d58#NDRNW45FVrpi3mT+oMTXt4!rTJ|p2RXz zgPyeE_Y{26Uq6#(RS!x#7Y$c?RJ1tP#4( z;lF690kk$}PZbFR^+HkHtP(yJz5(*%x&!i94>s%$yAOuR3G`T0^P&KB=*08Qp;?6k zaeVP_UHkZH7qgmk0f9KeAoRG`lLpRwLA#5>&Ot@f#1a4zM6ec1u=w)g&3wlL$10wo zZRAtlU?A^o{nf%&)EltKNF;+7CLTapwoFfOFk8s`XsP{Yr|oj5x9XjFnJ|ewvmMZnm&d&N{$s=NcjK(c4aO8KFhrb1Av)nRwtBl%-Ikta%DwL!W@e55zGxjTg z_@wT8qN<~l3e`(xd1D@Pv*i7Y{>2ML#O8&EXDbqJ5ZZ@NIk_J7tke;Uc) zv3Jqx4t&r;-3Yd9`J1Vc(UK=tSz<#2_necB%}Erp^4eJPj}Is~c6N4d?(V0@o&Hxx z%bn4&mxjqf8k4^!k$=gC3DC0CBR@5=+70!e-+Y0DLj%-7!v2>I4p}1pGrJyC0&cyP zozFfAxWC{X92^v;Qq2{8r=g)yW7Hm=u(q~_gh6b|u5q-`eC$m&8~AwNsyf*2eTM;A+Y^Lf>d3pJ#PoLCSW}^{QbA%aB2jZ!Z-C8MFAy%v3!Xn4I zgZ_N~_>zx@$G4G>$<@_W-sNBxdU0`aD{Shlpn!&mNr%g}Jy&ndkC^s>3!Lb_J))2y z6@e{dR~NgVGKG7%^7R9%K+vNwJ_|d08frY1h@=KCQ)IXcUEtlN8f=Xb4wY+`y4dX$Lm=o5;*~4Xv)cOl zkVy$)P>Cpx|K-8g<@@`9gRpQ3kaV=vzQwo8hr6qHgi00`v<37pUsChCJR3wAx3jl* zMaA#s-9S;P4zCNiJ8YIm_kI7GlPYY#Z#gCg6A}ClRzE{HRG^w_S6}DzX#MM#o+av2 zw-x{0{iyo~cr;z9i?_NpoQaE*pP#Qrg3E*&Ow|HtOB|&T?)K&3!ra`P8l@uC2dh)^ z!81$P*R4N>lr2q3<+KM!4D>C9SvOA)B(~v*Nr<7+Z2yz3cc43gLJcD+x#QW*+rtOV*?Do-odBnGmTboq1 zL4YFnQeFEFz_0xBK3-mGcK*fQeUG0=L-d)I9lW6&BDEiITB{e}7bY7^R(xw>DSrsH z7|7EDYC{`bphqR_^WJEBdU}k!{_Sv*M912#D{;hS5+dxGkuL%QsIsU~ZWAsouiKac z3_%(k(1KPOcM8(cNr$aZq_UelUY}a@R-J>K8Q!_gNKM(?VtvB1JZ^Kfy0kL631 zZ2s_O0a`th`aa$QPP0@c?92PKh@QjAQkCygaI5kfS5(RM`0$LtjV& zpgPSZdU4cZ*b#v%U*H@7`#`AyM_vL!O6tk)>uyUl%X&lxp(ppi}1qg1{xX)T;}s`O4bz7rV6#l+5a z2L%}!QSz$fiaPowpQEpuL>6adQP@x+W~9~!FcT6JlWuMN9DZtGcE^!{iLtTwHeq35 z#*VnCO~-)MI|K~pki6Ej>0lQaW*O3_cq`zoV>>cb!7!=&;BVmOR>lNvR(dxHisHo{ z`RVCpz<_nvDRC^Xt(9xAohm&>4xw6mJ84iz0X_8%Gg^RfOz%_HgxU z|MxTX#DoOU&uGh9!&We)Z+nRHp;;0M2`NeBzE4}p)YTQr#1~lxR9UfQ;n2&b_DzhO z1Qf3*VWVX+Q)h(x$r-g8|QkDD{Ai}E^ED%{@_#Y+6y6cbc=?iH1l?~n)AOb`_#lBq6s z6Gj1b9FvFj00PA1`t0LuXEpz&j{Hi0Fk}doGRIqLZ~-U0M$ME;GWdzhI-o0`m)`j( zB(*WKu(}EL$w}bmo1-B;$w}X52dJDq6)DzC8>w=l{X~RvY?{^RJRAX`|$3 z0CP_vjhi_tHXFnava5yHf`kQmOM!s_Y37bGsv{*#?V>9^%iee@OKP=XEoDEVhQ^2e;(14cl zlI)w{$`Wwr-2^~j&a#ew`&mD^vt)yw(alo}Ip&$~!g(D&o^^Kvc-~()ClbccWdyL4 z6htH%ik96nvd!_M7oH*7je^(_v+C#yJ&D6rre{p?QdPUu`fZe9R7^~iOP@GEarKEJ zE1JDJOC@AR?{L*0r?J_YUB9ED_$7=05AP6i{*-IsDVLFJc)KARABDYLKVs!W0DHdW z`qs|uSFkR!Y@a~($jJ#qBynYB##8k;R#(|tv|MY8O%6WKFD#^^;7~c--D7;B#P0qa z$$(3>OGWtAwJ9`V`NtjEn>TL=UcB}>?!4mU;yNW+K%I(^M@-1caq>)!VzmuKSb@07 z%0%U|>NVa1>&q&$yNCJTZ5^!94=r+_Rgl{xlxTwuhYR}l$X^xky!X;O<{KP~+ zA9ZPILOoJKLqij3zTzqg)RoZybRHf8kZtzr(dUz$SJug!exR|S)tcGsuR*U>fPUS! z2MjYRVGMI(e{M!`(d^zs60);9?WQXLR3hczac3i56qTGPI#MvgkSz^EJEKMLu4bbG z`dz4A3|ShuK<0Zmj@bQE{k65VM{`8>z4FZwt-rRq$Y$wc`6W6rZeWqaN~dLHp3DKp z^~whvX&e}esAA>D_BOR>K#O%Kq})TmV`t1d_g%>utw1uL7?8bB_Q~2%N%|QtJKwx| zWBZ}npt&ouCU+z_3nb>Z1g=wbbMnN)2gKOuD+n(y`^qFBAYf%>b#?r!S-&V5$Mpbu?qtJ<71h&OreB?CE9TW~ZUBi;56sP8nVKs7My6NY$b0CXo}MjiJm0&2e6aTX z`ZT5}>V$1v?tRFstK+Qf12$RwtT9q-`0W2QGJ;~*&AdBZ2Rd+rzGjDuORQqf_w5B8 zUHB)~_xZW5n*hYCs;OzxS3buDz(^8L)b+)ao4{Y$Ni!N59)18&L)8qP2zm{P?(pBE zK@hE!>gM5~9Q8XwerCiAx|TgsuD2)3PtF2AeWx39aCjI?r4_s~f8nhE4>3?tX=!C~ zi0J)EkLf-pX9#`*2kZUDCqO_nTu$JRi;G5I8j3ioX7DFV&{Of<8O!9(8zXfqT3VKk zf~abiWPhrNt7^EPa<|~Nf`hDoq>tatVD7O2ffdWI(l@iD^lv?c)2Gi@d%FNly{K4=w<0=mm1VaI9#JR zPNUR0PR)*Xg4xo-yzu$In4`$F8F8GBcYZVXpR6q;uKXiv1L&+y<+IFSe2iI|77mUe zH{Uha$~N;g{G?x(y?q8wS@*j^Wq4BygDdmE& z%`*bGBPSXBmE8CJXtmNGRi4DyS?hwLN)ttoRu_I zs`%|jF-#X5bAG|g_=d_aMfv#>2TYuTRIz^+dSrcsU4a0J-EZjoApHYVeTS7b3uw=> zZ!=Hy@W_Y`#0nVr=~yqO<}F%sN5>%Ton}rTx(4`Uq&J(wIgfrp@#OX&0Z*mLD_0Je zYL8v}CmV_686gR}Al9D|uk8}WiW$D2-m-ve(DR3j&gKgWrj8EN3APFD8UFqqz=q9& z-A#y_;S-vgS6BB&y9hZ!Pxa#44Uf&NW+R_~Q!^=RY+j}$O|LyQ3B1w1m%&!D(QPu9 z`u?KjeRD)?WV~wKe57A;n#oP|2V#4vUh{~~-!-aA*H2D^RED1>NH?MUPCW_mL_s%0 zCM<0dnd-FQlhvT{7l1unP4PXFY_Mx^WrKY}}Rz}~b{8+Z* z-4QE0@ zbco$WW}tNq*}zJ&M7p;(EKF{**3ioHG(n{f+-;h`H9_7{do4<)sGtDd6Lri1=`S>5G?)AKPNYqk@l=Y)A#*_Y`>cH%};@;PN?e99#l=tQ+-5+LME zHuLiG8vRw&p?atSu$@n-&NrvIlw{!*5uq$VV)NXaD%aUA`x#OvgEsU&HK$v_H+Mc8 zsAvPTfmEO5L6`>1S;!Oc`8!ieK8uLhbtc<%oSxg=%nN0GgP-`n*JrHTf{}%#K~SiZBKb3YC5Afm z93FnM{`|8MkFAGu#uM0DMqC(OYWD_`3u9xZ+Q?;k@K}25t?t{vF4Eik8~4iJ_>pDK zVJor#m?`@ehVQ-?85^6;X*Jkr|8qXq$JbXtv5SDim3BRbKRZ{-X|d%L2yL3J!xCu~ z6ci3eiP#XM#C%S~!&biGo%NBJG;oELs^$Xm#=Fz?fkUx!!9@C8j2m4R6l{tyIUruh zj+oLFa6dUa+YTw}*L3X%EXb{Z-S$W}37g>;Sz}{Bsn_Q}F@N?t)PL~w z70v;INDY5n#DJ*iN^zR;8P|@I5#yKg(Oj`#7ax&JtWSSEMudizr2|SQhqbmW!GE+U z`tCNJ%I)TCb7DdxSDc9YY6KX2&9hWqE?9bc@|=aoIxaNjt6a1L(Mn$5GfM}Mp%Kzi z9MCXcOPOB?rKF^^>j++|NPqugV0H%tM;)xeC05fkWqS9MwI_A_Nc@zi2*gn^GY z);q7?WqbzgH)hCuE}wIbB0=dR5dZ4bzgwIiHiy`~T9C$U@9HuRV=?Y{P7{28Bq{bG zEB-Xvd=7|I8O184v333)5jwFtR1Nr%&H-i;BqLap@aO~Lw)#RF$3cID&OTrfUIQV+ z`CMj&H$j3~ol=m^$A`PfAi&36=4y$f6=tc=!%Sfkf8A3c`GQh?PuKdW1F!S5Tj6c&R_OQl_Dp4b93(1}($i-j zdkoe$zJJuHFnM>OZSOjjvi72)tjs2U-{B+B-yRo&lEpC5ypY&2<>JG2xnJ6CJjJ;p z0&X_bY%1TuYu|e@rCu%+X|NbHJ6#m&H95?|OMP2{+D$X{0;*z~$#@6HhGSd6D6RxA zhBOcwWIdcg4=H|-b<)?N41x5CF)RborOF@~Xx?VTiieFV_%j5Ej{*9${)=sS>y^VI z=)}G(Ig`)%t+jOzlglxvcYyPukn9I9USC0cj0K*Cn%dav;uYboD~l+TI$?BRc&g|A z3>GP;bkn$D&$txV7j~Hc{MngX!^<%QpQLXDe(LIY*mC!@a&b=~=kI0eg|acvp#e8% z+hjKFE(0_tA5}%i2M{d=;+DGJ`DC5c*ikqOKK&juWp=@iX!9k@Ryp&T~(( z;NJt#m`-%$thxJPm^&9{W@on-L&`HUh!6-ca?`T0btF(_OE2S7sm>M~3z7$0shz`46lfTCT3AAxIdwC^-D` z=&ouQYeJ?1u$KW~^R;p94stsRCK)3_DhngFGzWIW7Tk6hA{Pa7qR-q41Whk-C`*1G zO^2(}I=|w**A9L~RTK^b246sT(b4rgoaZ4d=@47&^ygE7XnrA5L?x5DE>BJ-q)5-o zIxs;%DFIFefbJM{10gr!2ytJasvg%TtH#QZJY$y|2GR!PStROU{?DYHn`PH6u1}#j zunfl_R%8kCThQ7QM5LlWPkpN&U&cNg#SMz{Vn{V=_fi09NCg;1>i#wnopI_A>g7)~ zlm1|v-zttuRg5QL%!g(}YWh(TD&B)hDX8W3^_wg4A1f=rRKPTRX&alddi%CXM?#C` z5r`sBVYOQfVBce0fVCNkg75U(=mMBju!E&S-+>>1S&G7CvwnFv?UE6s2ZY}}zY{fB zT2f54>bwEJ^qIRhe|3T{y}v%M`or)#3k;2F&4n;SdQmOu#k@g;*H4&;J-74Z+Zp?r ztQ^sIf;2R6!LwoKRx`;XpUTSn23OE?t!33&?yip0O0`;?H%t`K_z1n;0fP@10vn17 zit9!nlH^#@{Y7n1XTS}oN60~&%PM?;AQkVpxhcZDEChQE4sYeX)@!=J-3fZ%_7Ft{ z0JHtbGsY$5V_&4WTyv5!0x>QwRp_FIMTt9nu>y?%Ve*)pb2|u<>T6UIg&0S9>@0ag zj2gT!%n$bm^`i-(K@?RpdFmX(VZt~WBMQ-g=Mn2nIl{j1qe#^u=jubK(wR{q-;gIb zx?W17DXG&+t}K=o=$UHyoVUBKBa>O*w>vKI8Va37c~MOZzPt5krFT7iB`{mjaMI%9 zDpdwbMvftFLixk#xu=!Rtnt2FsDTQOuuHT=*S|5$=9LlRAY`?SpAH+LU)=^eIWYnH z`E`n%oZPkhOE>L8i^wL2^fJcRax;Vwl&dgZO%{19J|vG9aWaVM#kvGC zsB(v2K+xcOkZZ(cRdA1M51T>LkMsG?`Ah?fkhf2Byj~3*YAM_2_xJaJNnTSlQ{Pi` zCFC9BmnT3Wje!qk1b^fY+d$VvS&*umJ#nurK+YHTpLLiol7b*zL`%6~R2t_LE2XV% zeXB-ig&4K$yIz@*=qq(@?)Zn{qQ=Kd%A!7Y2L0)h(v+BGF2=ej7^I#tuae_d#;&G) zJ6WPE-#cEQ_Zg}e(eTAs8ER=|^)1$`uiJL>md7i&9Z<`PmxuO*40+7P7CzR&2w zuCy7#Yx!1Ev0qe7i9QQ19`{2&oaO~m>PN%XCkM*fO1*B6PcaW}ll{mLd^T4O{acSC zgK2mi*89IAW4ksqSj>i771$>E&)6)N3|Bz~F6@r&Ojl>a!EQD$mTU=l8ufOGZnrX> z&)Wm~p{QpL{8od6^p__asK+sKI0rK0tLl^sHUu4QT~v{{HZpXm5vU~HfcIBtQKa4V z56iQ)Ld1Z;ySkt@xhC&m)xNOF574Sj&|o3wa|)H~=?{9`4gw>Va&1jkG7>Y8YQ<*Z zF1x=*x_0Y*&$(hZXaVwzRAQy*zY^wl3X85B~B+Ma~dByba_Ca|zSB@EFnr68kcq z=g;g2bRcc%0IUI~(LR%)lQNo;dw1?O`ZvN6afTpy9oZy`!%7$5? zpC+eT?8dGp$JQ;vmPTAx5)cW#0WH$>!Bb=;sG%b^BUB&{^PrP@MXqUxQ2GsxGZSma z0COQ_7@B_nQbh#XOnCl_^{kNg=F?vFKleg(|GI?<;`b8Sh2c#AC&yXt0wm>^OU@%T3p!} z-6sPe<`P)nioYvM%gRuRP-3^oLIw4W7T@_%!oLLLRG)R}6Gr>Z)x@LK z(u~u+SX&8<(Cc&D{3t@*7s7wrMYU+sLUK+kpx%l-jB<01Ud{{GALX%8B;GFrt4Dr6 zEn%8&+^)M{J7rCNn4;@%DZ5`ZTCWZT#kGim>`+Dsb&GNuPZ3{IQdi@p=M+mpWT?>8 z$tt>BMhlQB^%DmAC0sh-Vg9>A|f21{7Tg8>8o z(k9yl#f-!-Q-ekJH?QN{*x_9O$mEaFWpM7QWR~Gjk`07lBsyhGO=&rZ&AgWsM_W-; z-vZ_kWrSk3t!%ZEv2V^NRb3-HqTj2iL1c{kb}ZIhLJLU+Dma230|;MyOWbO5T%dCU zMkkp4WqKX+7#~N){!lPR&U0@~E)?hm-tNAX2GD1A$0%MOIAc50xhq)hYfm-5m>Ww`_Ce|XD<@i!)9EXx}0N($@>i0^|7N46fr0HNK z*%S%#bxv2!FMjGNgF=Q+>p?Pk9VOLaCt6|MO5&fVczs7TQnec>^-5)D9D8UP5%h%i{*7@zt!rbS>MZoaCK=aD#6XMwxE>;im@BM4h! zsk{WInmC6fFID{~b~US`CiEFvZy}gTX&DNuOdlLn9H=FVwiPMp*M1izdevkAeGklS zt_IIf&BuD9=u7UtM}{$J5oZu419unWOySl5!5cuS3=tG%(R-i#NAUxM{@LMP-zk_d zTGyUSeWm4!a=r&OhGK>d(x>NY69f_~>Uvrna(&YDifdN}zxRu&IzWNS#Wc%>zCV1Z z(*9Mf^(pB*D?~=HXe{#k7^6B%$(G~%53)?nn*)^$8y{5xi&EvODf5Q_<|17dSX5E` zHX(QS$*~P-iAwhE@t?&^SnlX>SYlaY_`7C)RZQMGR%fff;uK!nLs`@hETD2X)Bg`E%|NX-uU@xZnE zjp!)fIKF$++n^VXi<<%tk!Hz(>QgSP=?iRGTucCQ%@Zh+|597C_;o2@NSnlnWmJ=ex;Eb>Q0>vO<>+NpAcwN2Z;~i-k@}vKO-bCs9E2VViADDdb zP0`WOcqqgL6p(9FiZBXE&uErYO&tKkcblV$xBIzz6q-wDB2>PohsEU0wmZymPLZHf zID5J3DCLmc{sJ3XPNyhj_sUQ?qBnYnV2!{(vL9Kei0_tx28Tco>EU-w6px zf-%U>(pBL_?NbK#hLZf=efT=OHYQtIrl6&k!}$*VU@(6P)8!|w|3hei(L;(W1X*5P#new}*=m{+=JAsJC;#00QL zl3VdvS%|^&uqa}wy4ZS{wJXqR7#%f%cu_-;C~?CVU~Ng7OR@Myb%s_1ArhHFIT8R- ziK5An!>tv39ix%eBsUHlI(k2z4KzWi)J+)d&?J}4x5w%~7i8Jqsp*a8(roRb%CIfC!hU54t8?UcI zNn$-pTb3C<2n$!64G1{XTvt|BUJ$H{d#;BQ-IS`~nLgvZqkzbC)t-JGr4WMO(-KaE zXYC$+T`Eu7x_6F<+JY!t5vd^VrMUJAe-~BfdF>L;E1cYY9;!$K@q6``XB2>Q-{*>4 z>;H-n7uYYE9RH^stt9MDJxK1?&y%QfK!x#{rKuvGyUGTC zFEgU;5X@M@zHe-pWEgKD{wIXBr?)pyWZVxbUq>8Qh+zz%;?%!E=)bU4bPDG8_VrDs zJZ~|-2xG0ywL>Wd0P2nSAJA1m(-B?Aq33{$DMt-j z?rHCrun#VQ{{eN@WS!geI%s6Df53NnE=AdHXGS8a#Zvg+(B6!R{l4Db*uf8DLL*sE z#PWJeX35j`)PMa~xSI(AxO-n1B-Qt*enEe{$v2gvVkR2l2fK8r$mVZAKPx*M6#QHA z*H>3B3zlFriT~ZICro^se$B_~-G4e%w8+%lytjZAU`^^9`5FPo1cf6fCnx#IaTj^N z{&}Ju`iz~+Lm$gRm}%ojx`btbGXZ2D9AT|lg)$B%cc|84wj zuS@+w#Ho{7sShWsx*BHKqwttSq0`t)`F*bRe=K`YzGkh^)cK~tO3B8iKyTjjSMdcQ zF3X!PsH}3E{;Y$+b9wm*tUUNA>+{!r*!lVL-Eor(D3Qk)zK8y9W*Yyh!Mrcz`A#lg zJ|(GL;1kf9N%xvi!1v}7(5EcS&D(nP8$HLj=cdWI9Z5kg!g}0i`<0BmjBkC}cF_2m zm;mZF7WX5|uuihiPcI9re*oxx&E4#c>WFeRL(^XGKH8Z=slbI^pZZj!Aq42V9}J9(QdhBP8;gXiIqqmy}$d*SvMn(aaP3`16_zi;7WhkFP0q@!W$ zNK&fYq#N>eLit^8Ysuy#^ulhLM6R|imzO{3kGSi}-db9QM0}#)8zL0wHmhVh!@-TQ z12d6lU#jVo+Eo0lprCJCXy#E|n)euMX6KJ&f|lu8p(9ZKO{z8w82>IV(6`GUZj6tw zSL&E64K9xI_t#$wjKb!LT{=8QCHn! z6jo(T(}|<)9gUq;!(QzbJop5gVbc}!eCS7)2V=xmwo~;qj``ZQlJpHXz4k}%Kt(Rz zX2#f6B?ha+Xk{H#4k;kiN8)1va3bqRfX*R!ad+71~@!Cd8^^} zHDjrDw?WGCCRH)#C;p@x;)Q@^>3L>G#zSFfbEmnvd3H{Y+`!0Fo8LS>TKqSw=P}_HrxvpCNCFtzZ-T zM<%xR441yTLd*x%aulnqvh9wtwe;4aWjy=^avr#U7nXG>iGEM_w&iYKPIYt`wJ)5R zlsZ(BkO2BHh^Yz(>iac80z*IsN~zP|-kIq9s}3&|!6k$?cmT?f&pr_NJv^3({Hm`k zm`gK8uno2P{Fa!b`FM5`IY=ONA4N$hP5-S%zw&2 zSNV!rd4~P?{;)eSH}P`wvO}bSzk7Q(+Q7&F1bxrZce@|)vhh%E<#x6$0b|vh3~6W( zDtbOft}M&Q;!#f>Ta0W4<2<4|ExMVF_$q5_TUC;lJY=NfMn;z%+(v^5Py!L%^|=vX zg-a_boT0DYWBxkU42Cd*N+2fR0xCXz2u&KN36e#tEoK}>uXJ{VL@45nzYgGFWKF_z zu&&sP!77y_l%iriGPoH9a7Mj+FvbVUupe(n-tWlnpqqdjpFf-yj27ISoE#_~{~{oh zt6K*>fcs9>85Z2+#gNYu4Ey`N zq^sZJ0gN(}0%RqKlSRD&_8Q;^qaV=9Nvyy9BJAT?;)=cGxweNUD;n0ejRw2@5dK~* zXd+~hBqW?AzLZ6_N=uIre2sDxyp4mJrj#}w+;{Wr56cJlx;WVLcGbx$iO-ir5?pr* z>sAo?OIPzn;fo*^)z6^69HfBV9Sj7JiMVT1fYbHX9Ko*@If}e#ul*VHOrSq#}nD7=vmA z>L2c>B=OQGWM=`CTzq^Q$BnZvqa{|=-mt7qNhX=pSl0V}%U3$R1tdaNag5MtY)WqvB zR%PpuvY-f~sa?6Q zUG(NRt0jE2g`SMYQ*2Yyvvbl5b28Hl_mRCAS}Pk;gjs^{FuD*_^UmXAmg1&!b4E!y zclX!g$FpfQ?$BS){jMpjXqn#Tve08Wb*_a*D#%yNn4(Y-(L}x%&JM#v@BU^-mS! z$!f}=dM4nzDC)~ADHCl>-05_&ueW-BeYTUz=hou?@cZ}g`piV|t=XFYVhZKg>AQ5` zsjm`aI5{z*Q;Es)GDARJ*Ifuy)UG4Q1kVBd7a17|xC?R+)>d{#Od!yL5gO{VvJmhx z4Vrz2ut(}0x5t=vKM;e`#W7e1UNKEVM#({3Do9)5n3FOmqOV+6t;65XIw z7()_7X#yCSap)8RE}MXy-~a*oA&u9s7jbB7M0Y$7n~CG%<6qT&&&txRcf+L0*Cn{Ll_Yy`eV9?m)SU^m0LF?>J` z8lxy}V62OBsY}?Oynwb$uDfZB_qqG>a$yyXcnG&kg0WVUH@|Le!<@g2XfLn*d`GiG z8OqOQln(Zz-%j0NC4qV>$K#hsxuV}ic@mk4##vl;r#NkN>s)&B`E~hD_hzfxqi7uN z?`{EwM$qGU6*#R^kI@UX)?adlz@8>@n#ilEAwNE;0`jGxvB^m}1V$oD+F@Ee%;T4e zM}T32Al`hn5WX{0YJx0ox6qWDn#$%CqZH$92$U$@8-Q?vLy?J21)>csX2<{@(bB$W zEp9$ec;!q83f07rUKry11jE|)wp9>?kUP$-?03=3&6SoF5K%l1xXuC`FyWaa>rU!r zNhK8(72s1q%1YY`L@L)9L*pz&lAl|Eu*Drz(6jUM;3GXGWMpJQu8HB`RWXwgRHftJ zH!vJoc_LBw-p_Y+z4Xvn8LBIaGbR!7ZIIMyaOXVM($y_+3JiAzQ%uB61S49>JRJpN zvYXs;5gzkj^;ZT{g4!00`GkI7bKayzw+O^Hp>BIKm4Gb~pO9cujA0o-39>t4AW3?; z>JzA6DswReRtRkjp#02(-EzDJ1&^iS-9g%E15A69&N~`YovHcPq^hf^1VCeyvWDG_ zfB!ycP9SIJM@TX~UhOF>BnVG$1RBl`CS@#{@{$r|CZc#H71ofv|6#OJx+0|4;OEu{ zQqiX$T-1T0bMOSk>XCbFZ0yjC=+x=K!aZQ&S5{WyaDl@gKxgOwno)0HAWl>Huku?;U7SJn=zW8q-34mhY^|`E6i>qbJ$2_OfSPajgSTx<}Pu>`BDi*s0IzmbzJUl$wq|vPg zr!};50KTh#07e9fYDft0ncJ?WG*r6(*)qW_bx;tH{&-e8Hb2uwvXHC*I?Zh^9+NJN zU1m16GZj{vmk})=Ik{hSH4qXIbcY_k13Fcdqi?Tzq0rld&b8B>$pbn}lIwc&{>K0S zxC+AfoYGw_$Dow9>lv>CXl!DCmPF%8>0FafeO=u(;OX4WrT|hCZ1-Q=M*|A~CYSDc z)5pP5WwElO#a8$fncJQ5?8Nu%bsi_#Tdy*VZu;D;sB5Zz)|psXo&ZB2q4V*<#v2y41);?1TG#`R^Uvh zit_RtEmaZ15It9bzeEXg^4E^;-kB(Tzl!4nM-~_j{w*&Xf7j%(nR;s2@*or}SlQrg z&4_T>-v;bj1n8@g8awuZ`fW_KGr%$cUr;*CHagM-W)u|2S6%GQZVX9wUmmT%sLgFFK+`Q-nw6DhYGEO_3V(R#ukBc4oDF)7elQ&_@yfT22Jv+DRM16uzQWexQcfXdD_I&cetT zz4sw}%MA9aY}m!)7l=h4TRe zGY7}dkF=k-z+%4dB$n3zGCF49V80oJ!Bs8{DE)><2JvVhNj)P`@I)s(f6V94H0TmL zgwQZTmJ~$>(>}0PgvbYhfoz=dW-8NP0?VvXpwkKXLah&nU{5H@%ez>HeXo@IuA>9y z5efv0ATQvxnOd~GvfBi_O4SmJ!L-V+Spq1a7XanKCP7I)SR`$_^(;P*mVocOWpz0N z(N8EK*2F(=zTRdzDAqVUh2Q56u7dj=0FUgjNI4!LOp z-Zhv23WP+57tQx%@5J7%)jI#NkyBEoqVQj*n}L16sN@c8RHjjWyr(;J3D9R@^cTtO z%)i#1dK?~NVq#uL4-F}wMLPS_?A@~z-wjxC?eep}hLH@^sDPyVjaBPL`880dx6f!4 zXaf^v)$hB$2Aml%-JVhJ?qR!P8Oh|BmJ(sUZTvd^r8^$G-Xhj700gUAG8 zQ+GPKnpO4-SPwB&9!>&e-4C>n+%5O?paT{jq}$A@v@|pT#>035a#(8{8_9~*)m3<8 ztiBX}e7wx6Dxx1qUO*2V+E}CZ^-c5)z#DKiH8hZW9$p0`!Z@q~R*dM&lx-lL1rk?z^^I^UrA#A#Pkj7N{LTftH7t(H`1WdjT%5Y$;{a;29Ai;S1jzO;=rD#_-FKm%x$%a4M>Z!=nGU z_`#bzO^|Mh`!f<%gaJZvz2}*gb^B}27d3}=_hS|PgBtvwzFR!~WYW{$AHNk73A;HE z+-6S7!wUAIJd|R2ph?61?S<^WA{8+i8p|wTz|!r#ug~|;Q@>GoRTqwaeUb7PBlsoy?s(D=mX9n5_!K?*=jo4o$~}v* zh&c2kqCp!7to24gV9WE9mYVA=PfT!pS;n{?5r$(FoF)0F9~hK&^Zo`dsVkC3kADh8 z;q6qweRL^=MFL`(Fqmb)QiHSrGLa#?g(PKAX~b)wn{Ecxwe{ez4>)*~71^3Yv_Al& zcF{;7sp({6@Y{!iyu6q*mpZ;)g2<$vaV8R*l*}nDM9z!|SaMH0EK-}w3aJm5Ztrk!Vk3q8tP>TIsupXsX>$JAq8QwVa zgz&1x`LC`K?oVAnA)WPf#hK_5y`qs1-N^~~IMOwz+MzqSq~kq?<#6NH&QFgx;ik!- znW7x~M}zDz;xV_Zur#m`FI+aGNkhyjP?rM;NFddx?rBoiQKGxz*Ep_DHE|eao*M&X zz(l616JvD-!s8-OzrV;UDth^ldiFTrheomvQdG~imAJ~{ZZ~1CbW`&=>f61OH|ggh zxw@j1;C(cP*IJE!fSf4qO-?i>Em~99;d|3^_!gWv)+a#03=QWlJ5ZZf+b}tK!0~_EiD~xuBmQ; z8RFwu{VZRMtY?bBdFwqsb%z{}jw0GAK7$SOH1gJAh~!+gh)2z`r7ag!I0M9XxU;~9 z*8SC)!hv#GbYA8FDGM=eC*##nHz}XPgDCedYz7dZZw?`$Jx-<<2?J0W3!fGE|8QmM zqwdkR=LhZILTWw~k)|0|rcLO@Rr_2#K&4S%JQ;m)4ia9XW)1_L-Cy+@POd<(chG#_ zl$VE!T_|G&L<2wx-pL-qTYRqHnZZPqE0{~~p)Mti>wdDXaQkBvO@);@Q!!0wYiPcG z?_9b3oCkeL`4x8O)M+P%(()xTUtIw4qKZS9kNH70@aD)W{*1bfjdnGYVEa2)#}Zx~slPmF9A`C^KOd);DKo zn}aPa+O&j8VJg24L-gnhHGS>x-BAkCxwGjlke9DU;5aGfs6B(%b2(gW z1$zb9J>}gpHVJFfIJjq3E?cTux+*FeQBi6O4IU@JgVk7d<8=#}glrATn*64C&F=qC z;w4_%Vto%180k->0q!l$FzQMf@JT4|XJ)i5A9-jzaOJd~C;&#d*Ay4Hez5#-ZO4DS z!Ln6GTg4Oty)@X1i!@O1P_AIE#T1^Q6UnV-q#9se!0Lo(j4H-jh@@Neu7wyMRW(F{ z=TTlQRVrgYaJDl^6%2&0q)9eU->~xKHJnEGEy_xn7ig6u_0j@>e1P%9MASye^Au&f zyJovW-17s-wci*RL|Mzr%f~q|QIIlf%aj^-hGk{ZZsO+(ck^_Mo1hSel3j*EK5Qqedpm(`|bOvWSy)0r1VGRbROFzS^P9@^IDB`RqDrcMG_!9 zr-A5a0HwLWa}lg@s*`GxUn^RF>|w5;5V?8*`KUI-oI~T0p0&^ zPJ}F|I*o40D=6%FCJDOj$|x#A?8=BA{*yDkX8qL-y!P?Q34>yarmE@)TQ~w(L4?G_ zJH!N=`v2rSAXxzE?B5BHhgjGefD9*!+HR;J;`=@@j>DhC+vGKj{sOf1T(;~W_J=v& zwC~uz>@EO6ATUg=*W_KBivIW-@K!AW2BfF-GbH`^D5}8W-QA7HsI|O|3?n=HzHApQ zVmkJ>f8P-x&gwR}t0?F#zwaw>0)PO|6Nn-U1a#F=Rmhqk)K39i9^pHHcX&uiA-aTb zaf{m*=u{W6kbEZarcVb9lz%0NFFOM_PGQZWXxP{azka1;XDpX_}tJ2T(3b9e2Ksz5YJiI^&Akw1$Ty$A*fWb-Wyb|?@ zQyggj1mA&z4K8L+m=3R=5=pEfu+_kuDap%|f~+;lc(^#xrjESg$Wvi(Imf%9kadCe zIS-GgP}==<@%^*Lm)|u9(q6DYRRvQOzr-U_({gYq$-bHpJWMJiEd&I_+I!dZvpqlM z+6VvII$*BB2L4EJWc33c7pR)bysw;W;t~>|7)1bx@&?f}NK6;msl-jo)OSZ1sDBn| zm1mcf3}q->GoP%9Hv)ZxJFwj${#`#Ud@N8N@fp{wgOG7Y4cgNqb_3%1?(HD(0Kv^@ zO|aep0&Iq;X|XWwZCFp&IH5xE4DR+y?V+OVtH_>17XB824tvUz3j zqN~YCK^JDkZ8|P4wRDtEQUYaUj~|Db08xB~`t^H}&Lxc^UCyEdn;8>0dlq!mH|pvF z&o`{=(w>#Q02F?H)0&Te#t6u*fXwtFJpGtcuSv^HlmH4M-5b!chxHSLFr{(kv?1C^ zMmIGVFsPY3Yn&=3ziQqT{4PKd?*y194+Opx{STyYM=1B|RVV^I9UXw97(1P=Pv1&I zZ_b@U3K(*SDA1L{OhCrP4Fr3r9;cfz5<2|DHBIGtyd0qU*YtrIk*5l4*IC?BLi6P^ zIw}Bh^HfT6$Gznz-}C4fZ)x4cMQt7TGs6S&EQqFwzGQ?sf~2B`Jw2wbEB;#}Qs=V8 zFAJ#>CdglC@-~AJ{ds0nMk_wCeI*&LctLBkvC20KAA<1h#pp&5IL2jAqTQBeYOW8a z7r(RvEegvJOK>=PzW#QK+w|K-X<1q%vp5y{_~IF$DpWsM;JcnIg~wV z?MWoEW>N?y*jg$I-5-r#Y-&t0tN=>dMK^v(FK4FK& z2jbaE>Fful5j%o@w`Y@OEv2&CFjE-21ROMgH$u)+KdQ)iUvK4reqbPRlg5e(q3AT$ z#hy9!6#StnpJ1;}q8!%g9yw3c7?9c02_X8%UKhL5G>!aDdI{`$u^Kuut*}QcU%ENz z^YRh|uV$^*u=(u143bgu_HAVr5spE4#&V2~#J^JT2cZX$`xpC;&6q~Eb_F1;^gUb@ zyOe!}Fe0Uo&KMDfH~?m07zknDg-?OxRKKwP=h4ntYD`%{cY%^E=pToNISnjq*T*~v z2vk6qst{9D;a?jjq+ruz?}$|!lwLAXjIJdka#H{+_2Wu2`jajzEL9<=0?3;(D;}To zy1YFLi}aHXli}jutS=nPEGwy}2;>qO&K05<1b9NpB;5X~LM8lFQ2|`Se05F0Fyc(R z9=C*?KcGOf_~F-RsJlSo70I#%q-_@yg*y8EJ-r{9_=vWuEvU#x&%V~9$YxirB}ZPJ ze(tHOs{`I8{mV6&L1q(A#Ps-YJ_O$u!Q!Ku0&rNbwLMlCpx+q$7EYZCuRS5|pr=QQ zs>KucUaS-U)g7+cR6AN>@oNJ>p^Ix8zTwTNJ@yGUb8{xHVs!%_V-;S#k^ub#*d@cq zUxEq{5;J7~`)q>!{_{Pp3w{(rboV`Mcp-9ZNoWHi`s1k=1c#us)tB7-zzI?5AvrcO z0G=C^OA)wQ^E4?nD78y)xh|E4OZ{$a%#6&&Lq8(PCbYnbBLePAXvC)|1$ol*f%<3` zSbrpJs*Yy>fc>)gnzapD+;BaUoB^t(OG_$XcwWoP-x)8_qyJUqwEGHOJMJjs3c;cN zyTbXD1!R>t@07D<3Gpe&k{3gjA|k5h6;2pL#-!wRjpeHDqqLs6ZadJHW=CBNAz-%RS3wrb^D3K}8vP$`g`8c9wk03C<~}ES z{X&YaxRE-bU~oK8poiN?=|8LGLXP{E!f!Fpf6#X_(06hL&oAnce(>8{pn=Q6_vuSX z5AQR3D^`o2e7lK3!B+_5xj5kls0yxY@L+ebvM&K^Np*qKjq?7BNC*5JG^g z9iBy%e8W$B*zdHN=%1Q;h~^GyUwBs9j!*t@*r_HdAT{j}DG}2CcK0-hSG1D!+~_BR z(8Hp&5&}VyCB$g9d+gntAY#!f((T9ufgJgJLNVG0v&XGWALV`x<=lr+;`!V1lX5Aq z#)%-+^Yrb#cix?LZj}=;vnTz1$uYwc_!OaT?HvOh$MGJE+(H0TaNuCs{KWD_o%caY zyzAe8cX;j*Lov^K+PFD6!C1UuY9CPWE^x)NQ~VL~!g0~6)WCjFlaETVAAXSV`0;23 zI|E-!HR(D7TbaW${%@mz?K8C!w)_xQ2ELe<?JhXlAHV3^rS?!=PI{}Y5h|x z_*xUIo0An92r<#T!E`Y#PKob@2!pIi(0HxVpoUi`Q_f;F@x2)ixl}r6P+p@!#3k*! zhd5Z=JccYqWly}AQ4$>3nhay?ygn#?!P*U_rA6xUUi(B-y>`)|fVw2mrK!*P2#b4- z&rVMff`1)R>dlxb!2XjHLd@qBy%r>F!s^Gua2fTH?z6+wf+4SESxn^A#} zv{gb^9;}?OBk^?7Zc;iPbvubugt965`g&lP@@GUfK%Ug7Ga{9x3`y%lt!w{Wb3|MC z=@%AcEyYcm!ZlZKe%L8mY#TMz>IAUk)S>+QpC5f-WjKu)JAPn~1^Z4F8t&l?`xPhc zrgXtSr+#1aTN)GtWA3)B{bJD(0~><&-5W?KDeQN>A@45xS1G{|v&wJkYM->g{$8f# zJ7W6hde5NmzVyRRmNC2XW8+GF4}NX9+?y?MRe}B!<$Um;k3uWyZi06Q=&C)iBR91M z&Zl-RBhT{ID4}~))t4JO7i2=Z&X7Q=K z79Oc-ZT8bIcO-Iq`)as4g-yghYWe>6UE3J!pxFAe&j)xehxJNT>&6L*(FhQ(bIG3h zZ;)~>1xc*3xo7KdTpv%VyOO5_>*Wfgvc1DGt>re|8>wygAuZZ_=8P0e2Ah@-3`iJ) zMUEFi{+&69;0to-Y%KvrwhzDCv?C=n^pYT*d#R>NYadF&2JUS+X4ZJbF_LKdQ%&@m zBq{UN#7dM?HJ)Hgd-Phzqp(N{jp1(+Kg`gz6oq8p^nS7??f8TX4?f86Cy3J`U#g~! z^Mg+vGZYA~u!fqE1p9otov0x`ZKRV^!DJQ)Who&`dKt0sH&5KY8g)Av$MLkr!g~jj zF`#?}8zJ%aoml(?xQMFE_urhpnTQ1{7GO=#Tk1KUL z9IrVPRRQj)%NL#Qr-&U`rhV7R@`as|8%WRv(jUrf#g7F1^+HT~2rsI9y?t9=%Wjv_!z-aayZ6Obs7URsi!0R-sSv2_$WhEamv~pbsT?px~Go_qp zkcRI~pOJp_L$07BEiG;8kZd!*m zHSW9R=!UV%bYrh1KVQ-AS$!&Ltl8<^1<5bl8?Y*jX*9b}7eWrgpths>t?z zmmZhju36s^13o|AD6>ItK#U#XjF27G&kDjITlN7IqdUE!KpeMWN0jh+#CZNS)a2k7 zt(^8}$Gg1|czGN9_l``?V~!g#^6wj%7^ZGD%>7s15*`+rb32 zrGTv9o;s0w_{34|-$5^PNDZR}gYnf~)}K}Gwd->U+gDARW6Yob-Z#=91>F@tDO^2o zE9tuM#0ldH48P{U300eM)W7=nNMw*SgO8>r_Z}{WZ;cnF-R*H2U(j;IV&)yXkL3$uDsfI7swP%fJsl<8zc*qcgF|2r#gu5UkFTXgv^d zR=hU}wYc)4cx)3l#>7j7*)Y7=K^316QTkzi?yHY(Y%ph-OuIVq@t!h);$Nm z!}q8*j`brdhh>?s<0*c#x_dnS?Py)%j0{D4J-4mvOz&k8Si4?geHqs(H*3i_`lq%!A#sSUt zw^hs{&Zq^)?N(KUWz(JQa!aq)(VLb-;ufeeZwhx3b| z^|_VD6O%UvB?)$7&E_7$EIY=#CrM?S98-HCahe-9Bro!@^BPP$N`*X&NrH8RHOd=I zK&RUxQ+5)KvB!NwtRmmF(k2#!I&%s}e9fnhxu3QlaHOpV#qk|@zm#@MUW*qrgs`v{ zquiL@ob}3okBeu#%Ra}2mdBMj&Ssd79sgY1JNjNSgvzwnAk|4jQ{R7#xy3vC%g|%E zPv=3vRsmjMubnztzY27)RN|{{ZGlipLsDb*-9<2IOc{Z_X|>iJdKH|y;I;de&&YB5 zVcxwwo2-V&y6OD=@fb-TFU|P|J&~g0OWBli%}g4Gs^5h+!CGm2!3DRxDec2dg+8&$ zZ87+Rv&uD`0?w9abX^Yj(oWqoexv$=%yeCJ1T-`ZY1Lscw>x_UiCT@WywB^|i;I&! ze5v@%@%{ly@MFIqbpzuF^rtB_PwEZHRGnrynM&suHEI(O_AID34!7lG%9iyK+gq8A zs@BGvI=UNOWmInxN%^M`cMOGsKdbfDvr}LMCpqbsuu}|B?Syf6eh8CL?e&VF%jDnp zhQda3|9Z?#QPv|WVFTkn9Q+!_g&!^K4&GL_tB!c3uP=^^PTZZWJDB&HS<=++2@zSG zr}GALsg3Z5t~8ozq=^^ zxF@{X?RFLEW6_YUZkanPY0he%Q2|r1EFuemiq08-o8*pq&lch1NljLj&vJZLurv8% z?cw;|FBXn8<6VtLaA8*wh=OS?Mh6>@r=GL^>F>R*693Yw&nJdItZG%Wed;xSe_YOu z)4Z5SQUI51&<-xPPQ!R|a~F8yk~lb3x7f-t#&+GXLl|Mp-ZZ>=F{r&=5XX1%>SjKd zoN=7)=Qsr0VAA`W&oS3+eI~?$@i5H%Dj->jJ;=kgQAwfMp-YQtR=MlD*L<(ooH4jI z`!@0=H4RyXMKVwR?{wepbYJ{90iG)s{}%iK*;f}FyzlebC~AxNeeR~r)^?Ay6B*FG zvE;r4KP%culM2u)!D`?qO~zZP6<>JtZ^O16*DUd9!fP<3om~?dW*lD){(c?+GmBr; zM#__~miC(~l$7v8`(TFFHQ^|I#|2jgDLYI=+rq3srgm6Hh* zXWOpLrTu%`-Po0p{?ztbres_4GlZWFXmL3WG$9)f3nNuj_+l~1dGgTNZ-3l22qUY=-`Ytt#cA#Ziz<0Z&oMki>FE-Z_XX=! z<#|f~aV1?Peo;F$bZ`3M+WZ-9L#PyF)+w#_=%g1FyqS7!H*+o4pd#0)%~jLfW8P1P z^$OcOsu(A&{TXyxAe_y>`pr_%PRW3i`JURxSfN)TlrKw%oX(3r#5P;&O@qITIH4a@ zm|fawBN&ygK~ht@z38czYzrz=^hzX;(IFG^HCW+n)uz^pI!=Mu`E1pOwu;tHf%s51 zV&AL}pNeoAH0^5_SJqupQWm?Az)w2bpWllfxZ~YS8Sz8$UAHHsSlBb@bfefH;R3h` zij{QoMxWPgw6-;{^}dDEqc~5Q@tYv4)~|Z_7ah^|8KPH;tQeVI4SH)+p{oZ- zR}815Z0ab&BPIqBnrTL(Q!IUR%oV98`XLCdah;qTAK5P2=Qu^q1G(+ktk!kXv7evJ z)qSD-$Y}|DpZ4>+Ag7g|Iz{o$LT>{d*un+kxvp%)g7Gw4dl&92?r$}`yyQ%TO@iw@ zMPy8J&fO4_M>Ur;8Y^)IkN>ZoB5izT`dwYf&6+v# z#UEx;alCI=gn6!~GZ(w!TNAVxZ1REdz z7k}8+ct5@?xe=~IIf$)iMs_&-(!HO!MtcP1vl@$#z0T|fD#m@2?UTmAR}m%TH#T-F z0^RBw)XfEZrmULn5b>@}3v@_MKk8V?fK?Zq;Cicx%`4sig^PVdwz0hQ{gCzq(co3% z<#h6=wBP5$8&#M-Ugw)#T6y=bG|bESeqrO zFT0rBR?6wD1aMMaSxo78i#jru^5e>e$kHDiQ*zZ`4a$YmQX9*E3R+#3Nkn|B&r5*dUWi|D;B3w3Cj}wddcs5 zL@XHBwszuA-5~~6=7Y>XU$RmZ@1F2(>FDEL_FV47f01QkQMpmrqk&j3nXObw^yIEZ zwa%`{8BwVkx4qv*KoM#`4}@zD)1IzZaTRnvE$0p!nZyVWW}7Us+6%JR`mq<-Mp|t9f{^QAkD}`52i!X z(ZedqVHW0(X%)QK$M{%YicWR<*Ks2mOa&7}b{*{RWgb-3ks2z+WARf}R^iz8n*DW_ zEViqOVzN{1--k>5N*vABwJRB4d*JoMO%Q7+PfyZgDA?e4uoh=9+ zB;c1GR!8tfWf1jL9ytzbIzf7LNZ>CWtRRcQ9g>!TQylnTE=P^rhVLNo!^1u=rb4@h z*aT>B5$lUdLq5$jPeOD}{E65g{=YP^5EwX6jF7`565YfKAJi9;qA1Q&4u0BTj1g#M zLdD#za-!tBvfUMO%!scgG<+8i-`ZoQTk-${Rq>T2>|<%G&qc{ehqR3>HJY0mKhBdB z)(e~|g*8zdhq5^dLtfYsJx~$iDNRb9(#VW}2`f@Pa!stXWLq{pTBPfbNs~u>Oaipv3=J$v>5DT%hR5)Qy@e0KdP5mZo1~eX@ct^O z;r>z_zP_9^1p5Hy`q3YwjyiQRmv}(~2#?S|U0@h(ggqP6QxZ)3v%E`k!W>wqww?T6 zya_V-YqL<3qW{@agiD%#?H81vfraeIQ)*ItM1Sqqwo=1<=)?2ZxStUJY|(!%aejD@ zZ2I881H-Tvt^Era{vY2x_V@cBxj1?L3v8>W6E#APUCv;pVn()>UwV3$65gPe7! zsrm&n;}Fq{+k4S9l0i-F-%mc9Xr*DsF4|!2R?560xC+f;oDMy`_$)8t{rcC!-P!3v z&Qj>-?bF2zKcU+->|f2Jqw}+)2Wj`Ww6;&%i=lRxzo8fVHzLjV=ciZo`@X*otfYFz zWTJ~cx7B5QHyD<1^fZopw!z@1=;thKT5Q9uzDxAjBzzDM*bTmKW^JWLZYgcf`})L= z8iwt&ajuGAy!tNkYj&Yt1Z~UUZmjY4WNTIjbJ;1*!$BpQ>hxCaLki8xn}8Q;C5^;0c~2XaHy~M?YS{6-o|#98Ls%hI z2o*jPwhVn<*(0VYU0g*NImek%ichpKend;ro~<88(|w9@({y~r*f9d&q&ZY zlcdsKU)%fqSUYw;LSt-k{Lmge@kA^vW`L1TTu)SQpmIF$ya=9af;B0;idXhyiRti6 z$x~`FXDYj9(bq9bp@&8u11H3tDXSc4sZ|eh?6y;4gZRqxJ?R23jK8oXk-VhP?5{9b zd$itFK14KN%E3HM_T2x26DN^8xn{|lB_*_r$1pl@wW=ePpU34}s8+_inwf^gE#2pr zi-l$ErYOR#L-tgCKRvupKdCdmT~Lk|wj!d`Up#-n9aJB&o};NrGD4HV+{?O{%52)h zS{_7)bnNovO=ande#dCyW?K@)L%KOa+0~iqr!(D;2x+uLF@n%3Peq-?wcz=w126+# z73YScl%!Wd7b-};=aD`a>CQExL{NAtj+pyyH`19~sO_T+?WuGdg1at_l+}#qQ^rpS zc&wrjNTVvVwqFO!JFbTeSV?3-LPy!Dc`LHdZGyCY@q=hBx#*vFtOOyE@-Gi}A!8z0 zHS_2jCK7}p3>iMd&u>$NW+zI2yc2rGziS!pF`)9p!8DG!;AP&a87HP-r}0|5B|(_L z7kemFLNKU9$PjJxr9p(;p<9Uj_Lqjw?XTH~E9Qyb_kL1k<5mSdq*0}hcpXtv6CS)( z6JnM?_#jQQ5I5YW((P*##FU(=a`nJ2{H4m=2V^8pD#UDFIHp7-mJjty%xx;k>XoGp zolvPZ_vl1RWX>=uDs`VBGk69p&d#o|{>H9UZPA!9U# zPyThQfQ{nLn@tw2KZkDggl^1zItnYJ$C%f06NHcXg$yG;1WGfOzwdvnnoG;4x=S#f_QUFvO9B6Mwu6MYiHY<=c8L=1 zTrFo#eXvi>K#G+etsByV$)1w{Z*=*e^0SOFC5=uKTW##Th={K;XlKUDoK)Y%9UP}O zdmpT0=WyvVuq!Y~9FVSaT*ipF^2Dj03(P#~U_~f7HGiFvt)!OfvT2j>B`#m_Q1lvO zY*yti4!^fkD{JQbnI+rl7}5GJQhdn#qW#OZMlUf-0o5I(yJik8nv zSjy2ME#HajQnOK8WzQBknaQ`tdx~jiRn9Y6ieh6)9Yv2ZJooqJ@{h!!#foIrt=)Z2 z`OAw~ihuUt#D5~T9HK+=Pzi-gZ1LS9&YyLJ?aje^!Rk{s-MxF8)ziUeaRJ z60H>uCfmhL{1*9^uC;`woykZhBbTR3%Tpmg`FVEP_qRoy7qVclC=ECj#Y>#yV1AOi58!t8%87ghc(TA3^KTnAUMAb z7%#yg=H_EdJJ03Zgnv7WZX0>drObPHg0XKImVDC^ET(xet`nfMq6l6m+F(M_OA?an z!#TT4_S=}H;>FY7B|iEjh6*C=Hg_k}!^T!WPJh-v9ewu>DeW2Z-MnLtY2!!s>15~H zrq2d8-?bZg+>o9gFMP&ZxL~Si7Zx1kxD99giU*(7uR(q_l1pVbt{7JIE9ZxL9wOJl zll$(rNt@+kNk;Q@reIOWM_*BVDWeHdA+~BoA|mtU?L29*t`p~R2_RK@;;N|{ZYz6` zLN)EGfpI%eB(yR4OYEhK%fj|NKh2y_aqz=y2em^;xU?9y)WBE>xv9NsdeGN6(#)^H znJc?2JUG>a$f+D}=DdV-t#p!b(3kvzttQT$zQ(u~2o)^bAbuRO-V1E(MBh@xeKKF& zw(y3A@j)bM?>VA*H6yn~9*&2!O9~v1VezZrmCK=g?SSB0C2gl-kvxsw#8hi6Y4}%k z4>xUCue8F|)(6&FR{WnF%x9#OOJqv-ePo8%u`!uXx=o&8C1ggDR-Ymll+2VpkClt` zv>jl4)MfFQkG?n{&TAhlAONX|;U;ZGKAJDAsfGAm%4%Q?fea}D+S{dHg`Oo?b&jnm z#7WN}S9fp4_IY7JC8hNij@=edKQdV1%xI5>?cG%=ocPSQZYo%W?Wk~JcSRA5V_b9!_}_^>0*32}wmj8YGnNMnXhNx+J6pBn0U;=nx4(zDNjCN(s`T zbR(&RARVHlG`!Ekz0W!O{N6jp9pk?Dj>{i=?5})TYt6Z4Jo9-z;rXFj^`{mY8%-kj z#X?hApL0njW-m=#9%`!H%TCm7=DqBmd1E~~&m~{1tg^}a&W(}iO6@0a$zBY6tZq?g z{rR(kl45CVy1*gqcB(HkX#_1(*G)U&Unr%^Vi8}m#gdAW{10ewKJ-0#JWtsh5{r+u zx+PtHo*}(!6|hB7fYR^>_v7z}WgP)9U|eHO=?8!SjDvO(TE)cjx>cvYO@x=kC9( zrondm)N}j)me;hf2+zO0riFP01^)r{`7wc~rRDyLYhO|8`qOi__&mQP4Kc|U((??s zExMI8QoOLvRbpNxzu7977S*yV7xIDp_3oL+giX@xq2KNmCfbp_<{7DoXjupoY`NUr zs;+h6;>Bi`M`5lD-}n|h>IVtk-SWb`uG=41yZK6m&yD^ZE!*)wIXS6axpSZ`2>#Ol z`iZ&hzvJ#d0zmk#uCArb&9TX29oJ4G_&sKA_A4(y_4leqeyQ;UWzCXh9R~QpHM1Kj z(1!4$!Q^Cr#Q$W+Uw1-B4t_o^(4MT*YTvB9yRlk6dHmD8UUTB!=`Rq%cb=@_8qil& z^L5^72~orK)twS0eXEvt$>J21lsrvY;Gbf+$c2;5$15J>U49l464LeqKC($knYdmi zf!`)kqFZnJZV&0g|L5I#j(j1LTo~O96ea)Tqa{$+^Df6Czm0(3=A@-vclAE~a{_eh zyPyQ0=b(W!!^Lz8zhd%mUMTNQJo05Z__9N56!pJ9MmCVHAdgRprTX}A<3+vJ#0TW# zn}!CPXF!r4vR8sn7P$B*tV-7v9boop3vnTh@WMy6BndyrSf0NE_tA(B91yndL8(uG zZMB@-GLIAGa-`5VwTVy2$1wPK6|`Ag@xaxOn60J3CI>i~`3|PYBQ}!@Uv@#7lb2`J zlUY?|@YlD7OB+Jn!43ff9Z1To930IJ<&%CZ{0pG^S-7bWT=q989`hhgneec%jaqI4 z3h~PVws9-Vy(wZNt{vc-vAy_NmU(fsYy~uZc}hS)A}41NNLzK6lxu)qh*eIHv?5o4 z!|4@pV}VFQ4ph_uV4GgaG<7)VXAOcUz_#0CH>sog1(u`(E%z4Ee7PcYuzJB=i-t2ZFHv1?#U|ppEXRR|@OGjJVsNRnnz3y!sshp1GyVD*-0}v;Su96aN z#5uUBsR@wOU#Np=LAEa}5Lh7qC2nqM0l_?kePXUtV3?GW5~<4*3@*10E)7l~q@g(u zB9odjGBQ8_1fOGIzVlcCnFC1C&_Q29h#855*ROs95e3sEKhU=@s(M%~frA1pcnik* z`Z@r5!PmJ|EXvvqTqgJc`CkjX_BwnI*X`w@wp4{8y%b=vPI2f_vlH`)cy45NZN4l2 z_G#RLeC3);qXhtjn-jqo)-(v%0?%;3zLV$Ltg>>@dg2~{xs4{bVKOi@y~)Z-Pgv^* zU58T2#j*0jPrpwGhbr0vctX!|zede~DOs6v1&fx(?S;Y4K~>tNJ8jaWrwXU|-* z;GUIex>{ogq7%S2AQ1+d(m^{Z{8!9ti&cJ_17t96tE$38UjYS#XIeZYd^dAsr{t^} z-_E|>2Sn?$X#hDpx?uDG?D_8JkZLf``E(|One54n;X;$>X8MDq^2(HDG6hb&1n?NH%X)4Wlgp>{nZW6B$wz3RB?m-0B3JgAx-M6T}2Dd^}hWe!|cK z_B`gBO0Vr@z&jIDmNnS8i)7Ihf+ERxLdo4UAjv(i2Cv#E93T`0H-v7nw#pC-7gr!I z!2>h(3K8eLd9R5aRya=9A;Pfn@$@ek_@`GrY(Vw_ELvKMs1yb}0y2escAfNrKtw#x z7U%-+cK>mK!PFs0+OJ(j%Z#}BP)@a_VXpH9JX!cx#fzq9aXMmIKYtsM2*=#znFszq z`EbE+IioV4_ovHAu-kc->94@U0rK81$S|3TKc@+r<4&V$eJ|2u=uLo9%^n1Dby`YI zLUkYkjd)0d$h|%~3bw#aXoN>D=+!)U0K?Y#B!)?dy?I_(j_oEJk4BU~nnp3|%1B3m zc3&aoH-FI zA6Qw<8wG>F@Le4e;kjexk%mg}WMo(8Jny@;NE}4`9+(HALnF7N6C)LJF;&Zd$xOxh zIQToW?*PSl!^F7p7A%4Es(2{pH{>4)8=Vb_)kzzH4ugY(qu>C4D=i$SIcqT0lnAUx z0Wx7k@@BRe(ctia4Mo9=-08A#47q(2V@`;Z!IZM9Dvg-iTftON#%>R=5Q#fWy4V@e zMoCL+0_NNqzn2EAL1W<+`T_v!d4y^%#oB)@xe*o8;hGH%c>C^M^KzSOI#9yv!Px?9 zEpN=doz3;|lD<2vX(4aPbE(m^hboXI+bRvS2fj8_57PW@t(ecq>->&HgFrlvRp|2> z8O9dID-1u{U4*4}r_hsUIB9N$ss@rS5$E>K3s*xBjT7Z39~17FhMhz=e2%Xet0)#(e6{&tNkr%SU9pK%cm4GCdw+S{N~ zoo#{Ts-0epsS!ZzNBM18&VH3V3im>MMv0~Bfvu0IkYE;x{WN%v2}_Mludwq~^f#au z#ZJcJxQ=ON3;d1&(usAGh`9HAUZW~2s&F5m zGV&Q8br#`6WCwGB$u0g6hcf(ICOLX``Ztf6^Z)+%04P2ARi5yuA%o^q{5?&fr2pPk zPxo0EhFbsHXR&|-uKgd|@(U9`eb=$m>4}D(O+3qz1Q!ZvgKah7SKWcCYyRo4&Y@d3 z9L)>Wt54)1kH-9|fkIC_NfrKg_DLWgRKpB1t0a&|P!0~{Q#*#OKgU5CnNfUZQ}`j^ z{A%E;bb6r+|GR)B*#>A0d1I#m%pe3Xz?@QXML+#MHM;cx3KT|CPxex^%V}UuR3}Ol z_bgfdLFNNo$uY1@A5ysq&vAhYrkGZ1`P~xVgWr=U$2)nKBjI^i+Yo^r*C;%+=H}+c zp!1r40x#($a4a$|x*NEB-P+j1|44vqgo%M+&Y;Y82vqh72tWb}W_u$`#qX;@JXkZ9 zXv(2W7ZVcN-tF%0!qX@ND;sO6W^X<@?@mxih>SWYe~gKi+Ks4Q4i(=6n;7|ACZE*ZygdcvNsc-2S7YpTB?q4si$2tkqKZ{pUf%0`z~Rzs{;&iA%UCh777l zN?KYM7!1KHZ}S&AFF{8TNd{`vXcc)f3oGb^;^Jb#Ru^7AUS0^N(nZzxG3J!P=V!qu z<50X28Kf36K(9>b6nOpmHF(bIYiMMM-q=5HDc?SaFE;$NH0NqDOz3(#Aa4Tc6gv0XwQJhgEoT}G?iv-z=Usz%`0Y}+(4>|T zMn)QaJt)a@FlJJl8WcM18I&%fubV zvkP`@dTCp~2ah{w@mz47xUuWjZ2UOw1ya&j6v_uSu%VBA*N29N^ja5Qd8^*BgW>e% zbN`#W^GqApy)(Q)Ht`edm**|fw1pD1#;)6>#Oi9!iph`Qb{u=!TVNgyktu7tU*28c zIYre~_wUfLKC^=MxK^qREYHh8HAh!Z@5?98Ui_kA82#luE+oV;QsS8^AG>a4#=LJ= zNCvTV$KnWUK|S|&lz2HjXWiOMK}tu(Dv9Guy6RSa8N5)Ymnh`cPv2BjRDgr+8F>M_>nUkgT0#VH z=uzSufNsg`qkrM>NZY@%WKWGx)fEP1Iy;?>Cbx?QE11G=<8jpxk&?cDxYQqTUs&hq zzBEUA!EPE8%+X5?$dk2iK&0^)V+ewwGY;?t0~w=rHrRaA*{6_;Jb&7bGYy^E(a{m$ zC^r1q;Cc4P%_dP|^?SUmNS~JEefjcbad8m{xs;`=dc6>`s+s2qovu|J-F>Hb$n*OB z!)Eb=xkL`nP~@;7NNVD^do1T>YrVMgB<(}isO)AQ{%_GMGBqQZI(SG|EHVDnj$3mt zgxMJ6p9+p2u;h__@p+3O5aczm%pHO(Kr1vnJUlfAxpV0~>YKa~AFhYzaNNebTd2v2 z;hjQkz>Yi|VTG=GPhl-Cb5vMZM_b#y;_g<({(%8??~g<6j0_CMetVW%xU;kCGp)bI zD>p$7EbkicY#LlI$G;S96ph1dbtUlYN*CRr7hF*oxng;RE)V+T&Q)HT*y5y@Sli6O zLXR!$Ip&&nVn96R{pap8?*cwNa}4>|AAZ>%%p1JJ<p@vc+AflHy?uHi(&GQ+y48p%g*{uusYtK|7H^$9NgN}1UiG7uG;Fa z`$X?CR?0L|ep%`2^{V9zP)-!Q!D_AgQ@iW-@2du}H{|8r7W+&gcFF6sBnCwodEX43 ziw;4rOd0Jbsswta6DbU^?JcsR{eFvXzBew!y8z29aY<6F_r*>^XOF z=xND?!R+oPY>~O0OIo@lp6O+A0!1U3n8{^cM%NR??Zf)IQ1G56;ha?Z8?;pIChLwM zg|{YMCm;FOvpZYQrj*(5*pc;ZtKH zFrnX7sU?8GPnf}tiL2vSwHijZW}hw-n4KGnS2WJ~NjKlGH(CQk_kl!U48kF@RDsl! z$_MMgQ+-Kr^4FbFPBO5iEM|gequZ470_?UxMK4>SysHlrR7dUYQt_1DhHPe z;UF$AvT7>UBoOGjSzF zMT==*yqo*LkGy;f95{HaAY_3yG+HF?Lgs;7Q(c|sZbJYpEpW!A5`Cyp{}$wxAvOkQ zM_@L>X=4Bm(7raU&pJ>lKUW9Qwnm)f<>m7@qb$oDCyl=gF2*k}(vl5VP za2)$^eu;;Ri;%Vz)SyCMlJfyaxhBuQEt-7=yfi^8zWL0&+MPQq=9)pO^Y?Y4Ssve- zieq5}?sh&rQMyB!mmvMnkSy}rgVc<2k#Ufb$hDKHbWTpPX$`VEJU)lJ4{%VARe%%IwdBr~O~w@X9x^(h90^VhdEw^zId=w&YddPiz|%U~)ZDyn6NYpA*SE#ww5f+&P@ZT|B`wCpU{FFR7gFoG~F>5Tpb zMjm}HFHwzq`uY^|Af8McxCxuIbLZZl(BtL2yFNB`IBjdSLvULB&)Fu znlU+jup1#b`Bv?I+w}o^KBU3&tw}YEB>pkcYVAUONbi%s=FU%^C9&~Hf z)&?ER<_6FmBf5A|01Q4Uo?)9fj`ZW6>9uCG&%5N}xz?7~{ z$zvuv)el5#9O_>VjV28g=A}W{$iLOE#Ffh#-kcsZOZ2F0^;Q!W1DC7*G4yS00*~f8V3h&4 z+@oJaLxY2KFQ@q~HG=p}JPRWn<3Mjf<24&vhCw6wK7U)7J`h`+a;zpt9|>~m>D z5`;We>|`%qy^8X37|M(9@BS#kGo>2IwYX@N@2?9_g8Bn`_at5{=j{)PkNhdDd@jjLQkJ8__Ucf zdwH4ZHj@(^K4^uVuDj~969`0jSxsd!t4&Dbo8F6*czEuII7mwoeWBP#Qb>@yt@b@| zQ{vQe#kRqre z()v##)&5f)WF0<|6blnNf_UPcdOry)I6;wJzP#d{Tg32zrT#2EY!N&yJJ)bQ7!q{A z!6;euFafDa||e#*o5*L9}(W%AqDxZcxHkN`8c!L0<~##&u9Q zc~Q+FKKM$f(Q9-IQ6aFl^PWGLH`LHzuta1dibWPpmItz<4bi!(FMTdHzj+~TkCq>X z^Xhozd%-c#`yY%DS3QKYv1Ct^y^-h8BVXyrmVT>Ft-@SAW*0BLRtO{^k!5!RfM# z!OT(*F9}f1_i5aDMQf;JH$)H+?U+3CnE}>YVNfLkpJDr`R^9_i?AJ<*3H{$pFC5`A zSbz;mTK0EM8jj_Og0W|1KZgEja{KiyGDI78Ay8YrU-_lpG-BXeBj2Qyd~&M2K>$V%O#jiP z++WXy5WQr3{HsKhUoAagqtm;@Z=$1ChQU0&8`a|H(0L)*(K|n;3a%!Y1+JwAopURhz2amjTaT zKY4G}E#qn`Fvqc&v#5ZMh2O9R($~VEwaMI4pV&BF>B1AJsjaK4E47+tCAQU8W{#nY z{S57OAUCaYP*&Q9&soKBU}FjVBO3JIkXgbDL-Mi5;YavE>+b~F*l-+ggSHz8G)}}A zL!@U>xvTs+?^7R1RaTKB`OW%Ai)9i%w{rIr&7x<~;sx&(ryqCeXGQq(xhRybT*8kC1Zo#o~hx=EY(KRWURn zkx-(rBj*Ew2jw*XQUWiw56m3I^p9+BjuJZ*U+8t6b7T8LLTm=o#e!x{=Q?*pzsCti zQ;GD$;g5f^ArO5%{jT9Q!&=SB@qq_Uci7pj0*_!Md6O_*nt%WPrSDbmKQ%3KcrbdI zySe=a3kRl@zMb2w)mM14R}^i;v3uryScQG*%yb`%q)n7r(${53Lz&<$rs|KMMc+H~ zciS4iHT+yPAGBfHkNym@L;K10ed?cuX3eRqg;%|6H3>__B>5qZjf(NW*}TcTV<(i6 zEkV}4G#EV_iI^Rl6n`dSrN`w4ZP=PpB<-#RX?MkE1M(XO=R$_bA7AY1>=b)2jgfc5 z_|u07jr8wGtaer__kPcYcL@XrgKtWuL}o&-ATn~^XV~yXahO|}ky%m`W>YkscyU=x z>@jnhsk?X+{(#TUgnPjpJP`5hf}P_;m8ClXa+ay^p38p}%_vw)5&#E?E~5DN{wIE2 zwD$C=yg3$otW5@x3rwplrZ+!^<|S~3g_Vf(;8X=+4>~D1xcfa=C6^vKs{=}#Hhocg zxQb6zv|q(Wn7wWfBBXV2IZR0SaR)S#id{jT5<5KM-D{A!-26E9dH>!3nD9h^s-iIz zJJe);G^}%$df>(Nt-6hF7-e8C>h`fYWzgJ@3O2GUTNot!vT7&rc5-nmdyt2O_TxR` zh>!lo=;acg^5us*c%QNtx+}pRXI?n`XuXxT)1_uLsv$Jckov1g=TFSBb5&%+nAP_p zUP#V#Mq#FIC$56XNT0-7dO(WEw~*QaSdNSi9w&sT-54FY{!mJ!XFk1Pobv{A;~kK1 zG2Jt^cDa?jct3~AM=gVq-q=iuzrc(5B6oy;u~3l+&0Z8iz>Q9nk$b#@JF$bi&N-32c^AD>v(BdP04+-A!Jgg%VPCiVZ+7D#4mYpR&QiUQOSJ6E+ ziqP+1bmVEpPG!P%!COghL6GMfB9d4W7Uu^=H~h{luF>?nKD*Q23|1kvLi_bP>LDwQ+7ymEfx8MnGVsdW}Ah81II7Z1-3D+wYS-uzwm1 zrmdVw#3ELG^Rezia^{cxtj*Ud_E8)+esi2dhj=le(S*DJbQj-GhrgRxzi#y-gz1(U zOMvRrTI^AGH~~68Z(bz9((Sb&IKmahRMofqFyqjI$BzHB}1wU zjy}3DY^dovym)jQH*R>_S@TNmr+Wp~{d2DkKR7%jwBVb`yQHppkPsGAGVrX<9klaD zjIzdzo{Xhj6cuL(>!P4I6Zjq{RgEEsFFSZ!=CY~OYx@r*sih5HX9+P!2%DgNWcU$WjS?n$fT^ zW1>f$Jfmxw#hk)v?sUZe0=+GVie1TXxzk=GOI?lc+{u^nFI1Pl+D1MiWEP2{&sOMT zW$gzE!r9d?o3Gt%Qq?wM(t|Uj{3~NJzX#VgVl1uR+bQza=S7SKc$|f zA2@X))2qABA93X=Ja(qQC*wFEd&shfBf^e%5SYN0KGrk9JCXKu37NhfFMJ;|z58uA zCIjH=NLBcc2iJlBSLZ8E9f#Y21FQ2WFf^(6KMys_@;w(t>J+L}6W(txsKcooKQ{(b z4u4u2WD(__pBE%fcfa-2Bj;MV(#K<>JtjFX>O+7P44C-Vf)k|(RXPt$Na49@i=|Cx zj3p>E3f?Sqn$WJAxKS~opZ?fpVa_u2*iQ!M$7&2ez9V|0hn@iK?c9R{WEO_la2I)Q zS{?O+R&cshRskXEixQpFsD$6YUfJt_&;gStm|H5xt9N7csxxT}-H*X- z-9f{jbgck~_BwgwRDK7;0NbQ#4O5ydPg@I)I^P~OL?D$jJ5O1$#Mdf&Ak+MnN>{?( z{kjWXfw}lRY^O3wGAu!7#hT@g%SC_|7bCf*@VB`8_9lFW{!9~&Q zpz2xLR00~zV4$|-KmX;M!c?gkXIoRkBMGS%bEBf^FM^gTpKpVp^<%E&{=y=g^6NMg zZZtQJKI!dWe29y>ho&6xq0hFrjH3oibohqw+hlS2lzA(*%Kg{RnJ7$J`!A2%`_;9c%!1zHVCd>XuwoD-6kwnBW$1Sc$f3w3 zB1AUubTVOksb16nhc*LbAq_S=o3bCb(TfWT2;$kZr=I@6=quT8$Ti}4OnnY5dm&nVBum&g0(Rxu z!I~OfxR!((O$YrY7vrjjw-Wx$IiuzMn!sMoy@E(fQ>Bkr(_At#un4F^f@EW6lSICx z5%pxXXFgar5mBUzt^8(|s}jngUQj)}gMB{&B-$JJA|2lr3C@%E zw8#wZ79szoL6|`hFl|bDouZHGUM`$P2?I&TMgTL=H|^X>LIzTG!HXdjW?4n-BA3I< zWNm&;)^inOuw`K?E`Hc#UAibgP)O%B7{{jB$C{)3^%*F7*^Xc;jKE8*C~gRi`4VWp z5PJR)d^|qY5twra^x{=g5nQfb=+-CrVSW;=>%FyTx=HVpv8IPt;|?;ayIhnLt<>R7 zk70a&PtF~mzUodpC42S`ktj|OoRe%^9_rfTJ&z`=!MhT7peQ~PKbH06?o{a#FN*;c z$J1W{_bxq5hIpP_xAql!_DVtjD(9!w9$OF%{Ze~ZQI5D$y=R5xLdg?1yyOWIn}Oko z(-pM*mMo1%;Mms)Wi<(B$Ba;#x#6n$=d9W=^_ z*+Ek7iqaPBjVyssH|bw>5r#~6L1c{P$*w{(C#A_))`QPd#M`S;-1l7!(IVsH7gT)L zK4NQpXHBMhbz{3DMlaWNmCc$ri|YC=6uh=9WuHQPZuD|&UbyR8cDtHu3A^u9lS1;0 zncrUFm@_P;U(IU9jh@O&ub4l`u}`8Ap?!a2_4`UXN~HNR55<0>GFNx-d7YI7=g$6D z65f<}sE@l;J#@=wAR)VP;T+q=Z_`zUEv>;Tj3aBcx2-~^Z;8yIdEYm4UX+!eHkYxw z_Yj$hGcS$lk8qb$8$(n0`YR$vYbX-&sn?WXR0KhzNZB9*%p~*?2^6xXd zmrO+F;;MpA=cNS4@Z|j&L8q$U*L^;I)V;qqOUZpVV72`nRoyT7t(U)*WIZlD!XDNt zpu7l&SLdNXEN^uYl5iEAu9vb&T=#B|&qpica5vw(quL7^{n3R?ZmsXa zH|}CwprIzQk(SbjR6PFO>F3W$R_kf>mRftLHTgfvT4pMzM(D;zqt4xSU*xFab;ki{jApUXWb z8-yQY*ST8fE29stV`xW8vXXwR{0aPhxqYmW+LCp>#uZKs8TyzxYDg_O=9ZHmCx{9p{YSQ$qiKmIgRTtZ+aaq4{bBOc(vAfF|o~3em=PH)4_wW1C($b(>2(~Gg#oDJCSBn*# z&y!GKL(b;^GM&W!OytP2w$x_ExOp7Hr}x&khb)VQ2a85IKBND4CaGWMs*3@y{V0@X zggBh)APbk*Y4#w;wCqj>cHT-*4U!Z}uhMA88={swE6bNXVBd_Z5BWai`#2~lfW8+v z7V8=GftsU%MDnx$P8>H@bS-U7$!W8r1nr?Vkc`m-c^^nUb-JXsokMb}BEQH>9xw_> z3{f``L4_2#UmT9sytwnM0_5u;+KN)R|a_>Ww6$bj07FO(?j$6{Q}6@MBGwP z*j>&q9FMY*w97`iZT=0W>ir9&jR|a1YUMemSgN*^|8aROTfcd5)uAbwhF1ltilgB> zj^}A54e?Nrv-Gf8pN3RvGaU1uOlL@A?t`Fz z_qz8wk|5qd`RD%6{Ari~pNqrSlIHo{^V0aKA}&?{urZjb0Z)fZCO&Y66!ivZ-Uj0Y z)Jz~=aJh90M=}nCBOeP@McJs@1<8z-J}5ME7*&y7>*DLpfqZz<<|Ph+y!_yQyqMZ~ ziynaTv_~7+mLiBU#%Swu5x=i$Xx5-#eAr=YXgCh}5a^&1qFc#=P#IFeLqT?X70zi7 z9u%L}NiMqDjczUpI%K}du6j%^1BpnaPQ%78;?fcA(D}FoLBpu20YnF~0(TKRNI0{z zv$MxFK%LwfkX_&tVX9c;1xnW=JSujregC=-RC!^PfRd}a5d&UFPft`tgasrUHUB-X zn%G|^h}hZL!OKoy*9RxF2`D8HQ1Hs(q5fc&cDby^Kr@xosOs)Mf~!atT!jD-6k%H} zpkU-oJ@7;xKlfk59l(i)2m339NQI^L_o!XYSWyvo{8;+UJiCzp?6oGjXJ21F6G6h% z+M~tKuSZ>kp^@a5E?t`Ah7v`DAxH#M5y!_sg{53dL_Uvprch1kPw)AkM=DiBVIV5{ zSD}iMh9;^?yW+f8R@L)1m@EGp%w1VFNt*xyg_yyaI{LP;ry5BRxH{<@8F8QkrmoIq zAxbc5g?#eq-9N8-zX5^1!AY<;iSMRsIy>fesDZ%dca_UR8Uz2?XAhyz;?H&eX*CLs z<=A&FUKc5~sihiByzJAwEnzt$Z|h~Cq>t`xq*kP#KY}TzX3^89oD9=2Rnkeu!UE|$ zh(=yl8Il!4Hhz%BR{JyaqLGJ(2LM$Z0V7eBhZH7Di47E8t#QczE)T|9BUgb@^hg@E zfn%FlXS$z&;ZQf?48N{2s_f%2F#kmXs`Zdo$@=RGG4Yc7PzwH2o=g^z1vE;9E)EtX z&E+mvewcuis`e$0MQtsu(nI+LE}X`VFX=HtdRCfjgQZ&x+m=jl_;pzyfY}%C|=)VVP}u9X6V9MHQNyDc^OgLQ_4QRg4U*$ z#MdxIR&X?jD-1MIa zheBsklD4-|LlTn4e)VbbP+8xkdu4TO)LhZBDX40`j!~| z6#*N+Q8!UA#}llgNW0iaHhes8UC5WLwuscSMUn(loC?hZLu4Og6DHIunuq4Nt_%Yj z1M%k2z9h(MX<;D;P|y(^wy&z|tLW+?5)w%5W8B~l%`%lh^#09jZ9>v^{y*Q3BuVFN zi!!pdqm+{!V4cBI^^S*(;n&7>A)69N((c2o$it(t07oD-#}9WayasS6jquucB}ohe zzf!6#jA}A+Q{ko_{BZ4fdbf~I2G3gcjS2!AP_5UMcBy!E%RL`Ld5#a5`slV@ZC3Yj z7HI&*b2x6}nQpk5Uh~tocpg2gnU`hYP4t&?Z5x^HR=*3iVWfNPg9i7Xt$LI}2h-F+Nwlvl4^Q`h}4RjCW09JJ29uJ+PfWbiXw<+=jM z?XfgKEPyddcolAyl2B*}YXE)+wA8|0K@D&;~=S{VQBb6OBc{%9cW1Jvkiii)%p&nQM#%cU*6^5kdUq`bns?l zWxDx##RtG}3|wdep739j2C&Jj8$FCM6);d8V17zxkq68EI>1-JKg#FKfI*Rxl9DWZ z%g3jHjhOgp95d2`xcaa*H`fE^2~1vaJe=|5bFc@4boc<#QUD7UJU~;s%M1*C@Bt!u ztYKKh3Zp$DqIBIoI5ec~ay@h?GjXZyP{+J*W1U$ut}JOvlIg@8l_>2axJ2C=4~$?E zo6;_dm#rcIR=r}cC;_~Ly-#-Y7Nbng4Ep(iWlDFGU>Vl!$>g&KfKR3{ zycyi%y8`$#=NCl1nE)ResOW}1m7{4p;_a2#F$BDMSt?F+S)-_#ihYd|yqA})Y{>mQ z0Wx>T6W0>3OxpejB^4Fqb}Y9U=12j~m$O%j^!O~}(XSuucRnb)isKi75PU~oyCX+# zz7+#Q$pvJD%BcVpXY==%qe-=!imNJq0HA8XCeJJqumr@!`lJKQ>ZH)V8VF&a|9t0@ zKo#C7UzA%AuY;C?0z70^5Zu;ER+TM+7Rx!?Z{^`p(fwe@3ld{crOXo*`2+TH*n;J0 zE$AV82~t!HCn0~DHi3jZo=qu8&S#~W?xDi-FiWDyh^uC2mDXHTYwi+$hY zb&Y}j(X%v=eMi{ZEL4v(A&OVQ6d)?iuU{fp)Oj=uID8uyaYYGn@x$#Aliv1rHcn1K zervO1y$-v=+Mp{P=F93>#|l?qr;d60RIDvN7B=jazI1fwyUxzeetyT$wwM0F#Pb1r zcO4R~!4`_ge=mg&zb!*s_YC|IaT!sihsHr9h)dl{=c?V(fDQ%IF*QBC3TPpa4gi{2 zRZ>E7s{~RCF(fBuL3Squ^Xnl6j& znVD@j6~{LZh)7;z1d#XtvmbQ;F@2(b<+aQ8pad&;xt`LgwqTvz9(IVerd5VmfcpceEzCU^TG?G>d zDncnx6)e*1lu3C#uYPh5vlP<+V>P47jtIyn5+{e7Du3MYGXMrL)|oSOhdp8 z!G{=EWB|Vp_a2_eXnPl6*dnDjMEnjbmT$j%_E^+lYL`_Ug1doF}2Y%ZN09|^LpfRBjk6}nIJY+VtMQx@G;ftr_L>B^#!UDv; z^QVSfGW6_Uv*k~;%EP@pCUma2cQ0}p~TfHfL0$Xeln115QQB9JH%Y1f|% zzE~z>M8GhBHH6ERkqaMXRW&0q9`f`d{|7F^+9pmTdARQVYv?4Ba@%9JE$yVy;BI6P zfv>nz+H&3%U{w9%$sWo2a>q(N;b$u4F(0&UzK+@R`E`uoimvUf!qAX1pstbD)}J=D z>M;>!V5%h_{*+1h(bC5I}1Cy-$LUEv+`rv55TDmytsjHa58{O)wWs-dml=(kV24rvf~ZG@!*a*bP9| z#zT)P1B{CK$}r(Lh#gg>WOdDLs z+eXj0yQw*f5?1%6a}--cxsQD42L4u9=Yd@}(CL z3Za;9CJC;1aT%feVH%Yu%Xf-bYJ=2xQY7bnbOkzAu7?W^=e2p zd+-wf8O?KLiiFgnPuicQ%+{Yng=mC4B4C43Bgq_J>uaf$HO$>fAZc0=11svbhG6cRw1LEg**o)myXg01w5UD(n9jr#?N zlaLpQ^v2FcxV+ij5AKsSDrA!myu)bK>f18&l zW8l5=uDrC`gUQK^j*$_)9hVKbc0!Xr=H4XVengQDr}}KP=V|A2M@HzE-iu);a2d!j z2WnWx1f=TLNi>q|&WdC7K(^q!QQ|60E1rl_n~@X^ccU;2)zxLqF0%9sD+~i*<7Rdp z4+RUkENk;JkTb}BK>_3yhAKN=p_yft7GRPf{+)!QIq^FlY`L!pvQg{K9Xofbp;8ev zYd=59JmTDojOwZpP{qAIB`3e_qayLJCr9a9i}G#j!e`0koE?rp_g7`k|S z{;zdYT#LCx4aknr(L`cpF$Pm|G_uAr;;{{tP#UA)dN%&71T17*qNuN#1Ky4+T=w_C zrR$vMn;W3yu&rcLZ1(gxu1eC0VTHkIlsfo|C3AawJL0&tf|k9a!Q{li(4h?f-9Asw zJq4?aVC#7~gDE7DB3q*8I~LoJJ5l%K8QY5UUp2{kDlwz?Rw}TkTk`5Nps`&2m^%s( z(4u=pL335}=O}o@ zKJ&{Y6aEfHuhJbd3bIq<``N40%b~`i+>hBMw|H9aT}WC7T*4J5T}d8DR_zXPzv%{{6;x~ z=5rwc(jaddg_`g1@G!3vi9q)ZF(YNcx6)-oeOrGKk@MKjnwl{p+PdONcz?Dh+qcZb zboNo51cWO9!&^dUd35DD4YTYNz=S0q4Z}(A*$UC@Y>3%|2Rv9r=YW|3RT=tl+5NES z=$kp!;1r!P1K1WG=l17U1Y0qaz&2DmLM|*eGY_)|Vio{?L6m*C)<~F00?1d7B_P^) z2*6bU&4~`dh&;tOXS4kjAVI8kSlXBv75DByk%XZVh6bzm5GIo8jVx)48aE(yM<7EUVqx z9W7|?U%T-}KC{%(mZmS<((_Bqcve)7l?%$SrT)f^8*Z()_F&m;_m0`OcmSc`;_+<0 zmzmi#q|kmuz3&ABseZC24-Jfs9o)r(HPKj@Wxu@_age#$5wE#?M7@ysY?6p_t}=zz zXDzUs*wE+&L!c(-HZp$S=8lU148$UwE|Q7um&)zit%ouA6IiNDw?vYnxY-xF{efBs zxrlbs1T1{=0ROqlG{iBmi^yfH&R47jPQ?I1_E_x?w7th*mqUnh_5f!kWRK*Ms{g*M z@|Nq@ZI(-a_VsH;+tG=^=YSY%bkqhwT>u#@w$gnzWhzM>zxn#(W#qtCZ=prFz70;S zstBksHeW~pHuI65AJs--(U7J02WS*NY+!^dLtiWe1O$L99&^JGJX1?bHa2^uZk`1{ zWghIoqTdIK9v9;uSMF)!Lrm8uw_Nzm=T~h|gNSt$Yz>PhItjmP++l&RYWh2NNgSdx zZfR}J&dg*ts^aJ2c>n=HHwsD|G8PQmx{eGkQ52gV;}xx)FYchP7QG~SMe8qYN~Lti2l#|u&@Fyy@2{v3W~(llSC2C;}!F%x5lMyYo(z##BU`7Je~YsPkkEn zwq00G^^vqtXq07haYlQ-q6qx*g)tB)*qHP@+OPdR9bWHuyeGBuW6vaNq1|7??f#C* ziRe`ZvqjotC;xU54yWTn?R@6#A4#KGohRkaS@eOB<_Owc0;XW3`9EV*IJNcOG8CKq8j}%%NjLz_wUlfoO{`XC~IeD0@b7q zlN&c)EOZ|n75c?*a-BFguXk2~b$%Dmb&JS?Q*?zn8ye6ORP=FNF6 z!(SF=e@3s8YdAs-02S!%dereZ@ADM+o?QVAo&sEs&+88jRSHv0)!c{klgF$6m<3Ey zM;!Y7PSt8VKclJZGv)vL?JCsnFVuDpCG%NhaZfCvFVIeUFdo&PxZ_i$NJ_v`y3S(NLzS2q5?CJ^w!gjEON-*YaYEIz2X1tUdew~*cl}9O zh70Gbe_5UUF1O83>5Bg6nM>pC_Xlwyrb$VGQW-f12heuT?v#Ui-FKh3pJ$?Y>ct(u z?Ts7CzBm{#@vAee6*<|d*EL$Xfbn0ayZBF#@I2XdJb6d&MCoo`aqGCGr0C`36_)%0 zS6qVsZcFb?xj_P2V_M-CT)L?^kq3>VB7Qe z-_4%v`~qiO%0E~9nVbEpgvaUSt6>ef^f$*LlR4_=-SOuZVBxrZd_0n;eZn`@E7#E# zhs|JFzxQT2EPJZq&uJETGJwbTooP8g6e%Amvyv9sr;#Uy7lXEaw(CJc@n|63)hbmfw*XnnhPc~nr8ren?DRd=Z%3>Pn=!gO@pF^L#fw8HkDkxG7@&=& zcwO<0&Ev0g7?g!xX$!F+8GgtBXv{oNRBZg-&8CSKFBpzR*n--*{fhnMhT36$OQfvX z;ml>nMf3%m)`lh;iQ`|nn(G=!bAM^)*O7~m?74hy2=t6HLCH-uwN0KZ>xR6n1{cd^ zj|BC|nWQzplhc4Ysw=mshppnz?O#Ba5vX_$kYmmpiyXgRo#yJ{5i6s1#FbsYV=Rj$ zOM@pjDlTqMIe$od^4oEg90#=_7H<+wosZdLb8^`0?=g2VNuKjZ4X^)kpVXl#WZig4 zY)Jh#WN%!IN;SE;+caKB$@hlM@?fnIR#!H*tJhKh=Kj~MVdvh+CYMVWE8t+?^(har6ltSwE505dn{rxE?5;%;Hs64Gk zGyAk?1?Ijc?_OiQ=Qo8KDWtVJ@Q)&2m$K>^Jf7ur#X;?k3E4bnrw-g)+**Q>>3=kZ z9+CPIb;xzHMo6y5?=+R{e^5SGS()|qs|O?6LdqjF<$CC%*&+L-CGr*}ze|qOP5VOt zByRmxn+?N*oCnDgeC6M_!-N-PQ10;VgE;*;S5AO5-Y$5jeTRlYo08^cD0xEC*Td4# z`b=FD0JuUNhlko#TJ3M1KZiQ;9?Em~JMqS6bnD1>hM0yy-^=9$PKo#w-$I+6Y->va!-jT~Ql^vBC%+|kp~g=%Nz$ro2521? z-BNESDjuAd%4%x3b$q(L$A++a96j{lyQbvJ%)jN>gG%+Qj+Nq;``2$3h9<>cPw>R0 z7D|#*?!QQC-&`7`I$bVFh%s~bgKHUG7=lz#Ig&(_`{no48^5cx5*tB)_Sd=vNm8wo z+0DVhAUx2&8MKy>J+bP!Um+1D^-5Kx8Cg78h}F5h#P44BosYv4_V;6{7&;CMqp`MV z`zt`@3rqmN;y3nZldHBUO1e$|dRRTFI_Za}a& z8BB}BU6O{b&6-4iw|0@vYsgwvr1P!!a24iB0=tbuGCHwtnXovMaRw)i9?$ zHha)wB80;&hhE>>Zg&jh=`jgY=2^haJTfw8~DiE9XcY7lIs+t}&om>6!KCk|V(4<_st2x9mbAJs5Ik|+^KhO35$`_3r&=-Iy84br`z?s{3K^6?XI)vGY zBmNe*6O=Dv<}kUPfXi75X_HaZKz~1x0HmJ6zr%?X!-5z@77Fr<;J_zfH{8_PY8MMy zTxN$L7IgXA5LAe{eWsg2k%D4N%f4{{5J59C7~X1K*JsB8n9Hm_D&O(#c>-O8_`n7!knC{Z(>R-fe%KQ^}N9oN!91&!6EP4 z4w@mbT^*Y;&`U(!kgPic?%6g=gg5n^37}l3#G7a|iJPXhoQ$0MC|M6xj56`2l z1r3|P_i#W5m7)!jj^SKQmM#`ZPLjgekBf(gSSjYY`I&(&uaWNSTS9 zOD)i)A>gcRFo7#IS0JCHp-tfZaU2i0XWVQ3Y-=C?BEImJu>ko1`D-v}q00I%*wh&i6=0)=dC8TfZV zOky!766U$Gadm@!{;~K{rB*>8sf%o0oGvJsanrIPBvI~p7%2wr_TpKmIxW#qO#t+b z+w$Nosd*^>gZu``7?7eZ-BJ9lYcK-zK(Fe4s87fVgQz0Vk|Djo`s)aavg;&Rpo7IV z2Wml*o*PUpbQZst23R;b35qrEBw+NwkfxRN`SM0=qtEa^H4o#XrMv zwh(R%R5x=6NK_8D;8Tk}YziUXPYx<=s5EWF!bg%&=0qnChwKlulr8&H4%Ba$2LzeAEBiNzJu>v4K8TTm^xmzo6cgWm=49L!Y>ne*KM8`_x{ zCK@e^2Uta+vAt(J$r4e3+h{7z1K?PunrX#lKxoTnqGVQ4jP!vZi18;Si`1nAou0_1f3&ZJGGe{t1rQg-T#s#wZ6*@)mw!v)yN{~{ zXX?ip{J&dv{zGibJWpP7zt8{_ zUwrZ<7B|4T4(G^`IIn@!;6P=;{pF7>0}URH_$+jn7Wel)_OUyJXb#VlbtEp z7j`^qV5@uwIoZp=ZVe;XLLUfN0RcGK9x;yvofl=dKk48Gn^|kEdmh&l(4~;M;beV7 zEY!=`tfE*v&hoWD>_!!!{;(Ii{W6-~PgAqwjrJ4@Ztx3GQ@Ve5Qt`r2<55UNC#~yG zWRW65170bxPI;DexdFzJ2p9h?>~!;YScnj6~ zTh#P;lJ*DZNjr!XkdyAei}GlV`hVqt`&xj<1JP$Yiq!ypc>#RRFY6WTDWYSa)_*6E zvG+xPit?|XEp>hZ_H*DbxUCCTkHAB$X4=;8%Kv$z^+5n4`lXw6EGG7c_R@O&(P_(GrkRig9UF7MVdVB z4$py=6|8tY);|+qSEv6Jwe{#^*=9*jN+KX3SpxL}dOHx_D$yi5RYSyW^<)E(TEbrU;L;Ah;|b~G^52{M?kGe7;L3!yP)wt^#s$q~Nij5A#>$jcKF-d(7> z05SO4fK77R88D>v_DUtK1#CckKm2cXd!0`<0i<^qyqXzs^1-qkH#`QP0c*iKKhau{ zlKc!B>|7_wggv`J^1R_NS5Q6i9 z9Z-;lc`1>367+Fcd7KPz#Sek1*P8;2fU^q0<`W273P}Lm46wkFE)HI44d4*me!e}v zJ=+K9j409|yCGs+WTXNBx(k|wIRknFOr%eB4h(N03F_X=nk04JiY`aJpdhT2pOnbqg~O3NjIJi_{m z@*#ofNZ^pwTc#{G5%zyC{hy1*1|=En6vD4WYTFLM{=ghk!TEVS7rcW$Sa>rGZs3A~ zge!x8-rmsk*T7>u`wkF@ZnMvU=}BJwIkF4@tYB1MN3DRtFyDvX0)F4aPM~V_bc0!` zKLwOVpz!K#0>;4>unS@IfVO?$_nTmMj#jzc$L1T1?cAvoC7rm_p7mrnwpx5ivQI#p;6n$ zEw?7W)$*4CbPAyBq1`gO0YAf^4+ym+z>Wp|2Mn8mRDU7RFKCDj<6~lcV)7XV|IE+B z4L(J}PP+{#Q$-)66!x=3nj@nKx{P{Smg467DvqHb4}y;*ny% z9)PTwSw8ezL!cF}ZP4HPBH=_uM{i63WwYOEI86vFfiTtNdZ4kt(qq(W47uvYV8K%? zs-%Z~KUGtMk_;k>r6eRGXpiE6?tk?^nN*1HDHf}(Gcq$p>tv6Cn;C47M7mqRZwfjG z#DpcDCzu1^a;e6)>(9GGhyF}}`ZcL{7`whY+1h9TvDj@GDVh!2X`XsAG6O((fcZAh z2nIu4hoR*4tcHS2T5V)cZ{j?|2ZevkW!7AoA%MP5kV|=O4_NyE^_uBq0gEw!GC+iM z-u{Az(m(Gm`Un`0n;@h&Y;3kbaqeWGq5)f68S6yDoaH~I&s#3WnJWb8w~CO>sJ1o0 z`>i&Nk-hyH7(3`v^r{L7e0yTzslGDUh@6d=zOY+h^4ZD%viqTdgOB!;AfbT%3(+rw zT*!8{k{ApW)?)thPiq{afSJVu3_0r%VD1AH#CT0QZGHfd-?_MPHPPA4Pz4;gF%QoP zIQuu9o`>9mZL|ou+wI&{WMpI%6e7pg$~Zv&hF_+441sE(W)9^c5E=sFIx^}~aC`r+Xr zFiQ1AQ}62KOrde;wg-UF2N05uN8)+B=Cb5=f&!8JCP3c@!zR*JSJM)HarGwn`9`Q-Y`nZ`#FFd1w%?E5?as-- zCwrLVU~r$wNsi8e6OLww!-@(NpVEA%z6sP`E`!LUbeK|)`Tm^*9o?%xa6PX>wwMet z&k~&GW<5a^>BXNpF8iJLNo!<2Kr92WcDn)^BbLs6 z?!mD_H7?PoA=WMe?*Ll;Z8&kL7bJk;C15fVQTQYG+#^-cbJ5;99!s-T976XC&NL7N zArIK9HTI!e^yA60Thss>*#@C8L?LGH1cz4<;@t)iIS677b*$r^^*(@DqliWeAgYEN z0Xs;5o0BB;7SOu^9olgA@Atnc9W<8h%q`r%Tf1XH$VWU+SHRA=QWsun?7q}F8V%NB z0|Zm;Tbq)!>w2^Va5U7El-jf)oN|;Qr4ot3YI0(t0gxc5egk;hz5+CWLrMbim!rj6 zf>+Lt^}x~lvfd{FIEr)X>*x0`W#sCKPTJF14na6g2d^6li<3=JHh_tqxGI}t$9mkS z<~P$pxTCV>gznZe61U;$c43btSP}=IbR2pFo6af~^N+uX${Kz?$3d>RZ~IbpZU_Ci7};7E7|l#K2!no-uxK&Yt> zGF9CtB$EL3a#@s~g;|Y(x!^_hx?~T|teJqNY8`g$k($1g@=t z14q9Pz)^`^87mtx8JyucY0Py8A=B#AJ~&QH`UjHa8o3pqFvNrTO<=6$^M+CmIK#Iq zYkmoUcfWSM1NM+eT%c>*fSsJIhOGB<@Z^PgU>$_b(Pu0@Z+Cva5oale@()X8Rv3RNx>q_CwbG3l74>kDJ@nr^XbTYVKFHMW(6q| zI;yP1S;5=9ieGP#sK#F){+Y2Dr*UqaSsHUbpQ$5uoLZPOJ-7j>lm`X}2M7A?1?tM{ zFqPApD`I0}=s#6RIJ9WLQn49dUj{TvR#(Rc$n#~K>;j2Za7adSa;Q;QI5?;%C}bfD z({9|zX^AZFaLPJ{J-QD}*7^3OExA>RSzh~&gg@*;^nIg@GAQl{v!dFk=HUKL) z#YYvDZvrgV4}e!jNEQwL!AT)~Ar5aZ2*}8K0QZmx<#WG21xIcQblIvVW#jjB>a}(V zBqJ=Rfg9aq$=oDfQ9W4_r!)h<*~pX0lGM%;BGg8tIWkOt;QXqn0LGq$d6a&oo!=T5 zfrFegrNaAji;IiH!(TAd_xCNhU(vH)I+EQv0~HXhZ<$wcWXM2+5?%J=ccQJ2n#C#@ zc6N58#2*v-@?O$s=I%u0Elc%9QIhpHO}4lHL8Q>1JshS>ev+YoNtcX^Ho=C(48o-m zL)-DERp`_{k|j96V`bZ7sg1=hX&Ij}Rlf zi~~0ns*Q7(jUKVzEU-dI&u zR>lsry1w%emnJ6oscYRE5fznwonr{U1!O$KHc%w8WJEZtcG$?O$mf8MLWFHH5kjW3 zF+*N%MV=sPF;~d|mZ-i3*~J|oDc3iV{`f|Ta`?CoZRG{n&gqHz>63Rxet;Md%jpuK zR+~At_EB}&_X|^6o`rJrW!__wwy8>=$%I=mdYT4&S|0}l?VeaRH=EuIM>Bn0H!@N3 z8Q}Zx?d~3?_LD^(Kodm1I2=7XvPO1mX{BxSFHB_710WuT8qq*>HZWYOC(N@h4VuW= zHZ3~Xc(-6MeOSmtmLfN>-jVr2w1&@E z_6kz^#*EZHCqU(U?Hk>mB5`m2@`Z&8F4>r$Hj}rxz;QQ)6o>C22FL5yK|(T}0&Zre zrqM~mc5Re*9)fW6HRtdx(XJ2cUhp$N3BJ<8t+uqZ05F7!pOr9tX|O~ApMwt1Z_w*V zMUChypw4rwVRJ0RFQEz$vWh=35P|Nt0@f#~;6iyNW62V$UiLen4KGuI%{p|C_m2;z z)*;}x_ug7ods7AQp2#r%?5%Rdd`?e>=E1q)GlCW->lK&+4IozlQ1?OcO*)x^54UW5 z2+EyWjnZWXZZNK!CTkwf+A7yTaJTWGrH!@cLbMA=rUG zmFXk+U}C{v%mGrW8iNl@&#?z1YJBH$7KpcG;Jb&QCWvqA3pfojUm@b{=q%0PI%_K3uV#`Vkw4AgCeyOog&<>2jQK+n_ARXO0 zk)*tiRX0>I$Dg0y6u_+i)zqYAG5uy!!qAx|a%T>EB}O`!EF7zz$ph#3Yiqdl{4DlMIw43yh5Kk$zFeSP_GRx{cHI>2`fR-)~(H zkH(MO-*8C>z+9xp90W#oK-7K=Sd^ zf_K)^((3N~;Lc0AQWtmSXbnhFt`KQzL}fP|e?)MJwU@v}&xO7-dJFlmuRn^J%?sA@r3{ zpk@287#bN-7#bVHvm#uH-p*Mzg~kJu8ic8ijd&b1ySoGCcdq=su0BX?x~(JZi?(fdQbEK=aJ!xC>`Sszp_PghAB;hX#aukLusA-> zWQ%!;fb$}WTn}U}#o7QcVo?F2wvby{L{=(@5Oab>=TuWtQd$xS1>7!y)g$ppkv8`D z*OcZQPzpuZ%*@S0D{+C}2H0?p6Q6!Wp!rl=SGUpe{D^GOTq-KL-5OGsl2@wDFE|FA!$ocZ^(zQ3=TY1ZIX^9`UxCD!2Z$em zfbvKJU0{3$#yDV~f}nOYe7d!IOMvwQ;9xrB6&R~nB&Fu}Sss824y+^N&cu&s%lG#lK10BylSra32AQO#ulX1^HEuT^dH&47^mF z&)h4jM@oFEHr8kpm#QA2@G6gN8yM1e9Q_G@of1 z13(Cx7=Ul9hr)uLSrK_X8+#uT<|w9u1q^CH6Keek2SisA5Afy4Gdu6zfHN@K($v(} zkyA`#TwGkM*hI`VfD)(0l;ve)z-+s^xR53fD_ni1Np&j*CY|)dQvQNazmh0N(HNdj3mT^mt zu9OUD0ISBob(?&I6FXCy%_7Q-#Ts~L%dLdzSDYl^RtSlRe1Bfu-M=BVT5U&2i1j-r zvjTz3US3{cZAh~$2LS*dkO5#f{SsEBTH5M155kCn{RjdpFDx_%}a}yI(j%KrC?WI>JQ7@bAEn+Wg<}1Hpu7aj1BuIT zruSt#giBWqkXlp%|6?o}M*aZ?LD(;1GKJ-2+fhygYiPurKl0of?c1GLq|x&7eY|XS zwYAsR*ZnNRf}+5_CzN_!iUto`BFXRh!Vh>jSFM1RiB0_c{2WjN#M>%;6rznh(MN>^ z7!+x?qF{m3>gW{*3e01zWGjdmcn+j7M8tA0RF9>Ax0#uhmB%Zq*Y@Y_%XQD`UC59m zfBh>dtZS`vct3pj08IM9>t&yD=PmSw?@{X>-)_dg-mWYc;|Ogs%I>Jf#-|ZI7v5NAc z#@Na<$G|Z9yXw&YZepxN@CT3{tjhUlf}Be}?Z9kSYSNw9lNKkhl3OZ)p~U^CO(2ihID$u>R(GU0llf4Rsa-NUs{8=mfG5-M1o8_9 zk($g0P1ul6ov&L%|06l%t_VD298?>eGbdiiPu<9VOcQoUAL9bh9v^D!_^K|%AT0`R zM2kYUI!fcth@&D0H^NK4s&A$(x=Yl#l&FUPA${Ou4!i%g6nWw{I?kj_ z|7}Wi2?G-&V>{BnIr8jv?VPO)NSP$fb&SBD_m3B=};{8c+wR&$*`MW6sPM3LwRzlPZikg%KdHKxg1%LZj8h@R)G082iY%dT4x|R_H z(6vaqDXw5j-!j45I@7!#%;WBs#}>0LNH58qPq!ExC~PCaP5Mb_e@)^VG*a>`%Fb1L zp`ngf&cX4ZtE%bOMb*>+>Zs(we#7MKZkLCLE8E-k%=ByXeZ2gVatiqGWqV64N{utz z7`$TMhh(k|SVL*1Q$wZYl<-YQK^L2N^}#9BLYKjjHvKTwho*b)5nAQwBeRH`SLRBh zso)<DM8`Y@;*d}n)v#48UfQsFrtvS-amy! z=n{H+r$ud=7So3$qL{d#YcPS-?TNR`n=M>;Xt4(6y-WmX667=OXAYCFUeAMEN}lkV zdpE<0H(>8V$rcUX69~cL5S}f6i-S?0O}*pthG&%6NvS}a{k_z|M4vMITzqCe;|LSA zEN6-Tkg3xW_6M}YPK*1Mi0j69J(tdD_N6-Zg5%I2(o@UUysnTXc;Tld_a~8i&W0AtY*r~->;FUs~aK)&DhV^RGdV;ZVHa9YUL%j!$ z=3_jldhoEWlA1>q#Wqbv1yA?J3v1N0fynqZza?!i`xgyG?-K8ioCx2{H8X82G{rkE%R0WekktEhx|yIYorrX+xGXPWBcO+q`4PWsRp8G#;|Yr=`L36 z;?afD(4U+YEV<7*yoL4hdzdIc>VG;1Dd%3r((AG0MwQ~lK2QouQ@+tdURfqFI{aFh?>dfL6DgL)%2zbYFZFCX2s#dzJrQOZE-5&PS>qQg%5js+(+VGqqrj`t=P6w^YR zVhwLUFj1rne)w)I_-WeXTH(eC$|1K#p;%#}FW|SqUViEGK366pPZxV+hh`r6$FIAs zrdoDY=!1={Hm?-Ww|d4g#+M8a?8gsqj@M zTaA2^Z+%8VrI9`Q? zV7putoT`MaP_RYo4}psp-_N@2Ky4j>xm>}SHaSD)&X%7(8`T$7`;;_F=N>hba5 zX_ijxFo(rlgWyiT>^$bc|<*_Mv69w4Y#N%jQY`VNk^+GMmQkLeQj2P}q z%u^J!>$9`?9|d(q+uzh zV&@-r#l3gk)3bu>D?#_W();y6p*5pNPYG@zTYS(VbM%@dBQHX>yW^R*|iCwz9^JQU#v1*W2Kv z-T}34Rm=Rw`N8VBl+wYMq@RCex`WtEZvz|lvFnfV3|jo07i+itgFMG^WPQhY7iOtf z+>k{+CWVLyT~*I2zf6OAKR`oJ*?%(3|NZ@K{$glT28Pkivo3>^`}Pcu_LS83Od0gk z>_fa4w~9x_tVi=ymRZk+|D1lLalJcyh;8|xpk};d5c70y(|IG+^Hu81CZChW>bqjz z`8hf{31n6-1Qzj^<9#$p&-+h0`2|1l*%ufTygRYGNapvnBsgERCh0L3#usV`>9^KT zrQW~FTU87FqIr`)o}g*%U@^p`whgCy9y;FF{lb6Kc_sDxtJK-KV9C_&?}V`^C+hv$ zwLa|LE_ewm(|oA&Cu$w}>+0R3;$6_x2GNB27C!9I4j3?KQIQCY{g?--C7%P|gl}PP zv)!rsmgP;sYcBWlt?VQGjKw{5THoKsGkz5#)A082$+yiw2RVr>(UxfnL8K`!E=6;_E6mG-5*`Ez~!g&`;7 z)U;LU3Om&;!2^ zi6;CJx(UXh8Z=u+tgz2JI!F#gk3TZZz`)}XN^_oB*!^%oR&BD(n=@5%GQstZiJ9_3 zqL#W(n{Ynk>ElE1Rf;$F{?vW*`NaQYGohAiN8%=u?bjYiY>i{v%h)W*d0~9p{xu5d}0o<)c3yVk|SnDv%HB}VYUBl(HpgI0#HaFPsVhVrP*LkeY&nh=0yDS)!&=Tz{8h-B{%Gw6iF+vE0bB;I&lpOgEPCu7{JiuNwc# z=E84Hp<5OD^ohZ*wAX#q`Kuv?Gz@uO>6&M5O)WL*=TDPOUdPR8Cev6OQmCXAJ|So= zOntVwu&l{5-#gtiyEE=Ogtx;_;(uP@QV0EGahIDSK8nDcNiK%FJd$pB62)}6k~x1X z(D~~E9ZTvmdcFoMW>D}!jhKohegXZ@@d@7c>7zHf)DN@U{ch$=@|kZ2dO+(&B}|n9 zCQiERTlU@WkA8Rka3HK_HACnhov6d<^?M)=`fmCCHICxWuc8yjjv?KFsdEf5#PMWx zugd!<{wJO!vnMX830&O`SNQEi6J<-kJ(t?C+f`c-zFe$@I<}KKI8<3{T0WaEnLVLr zrCv@}v$Ao3qLsIuKh18%W04ymSl2uX`Oz7Uj>V5tHZS0f6{;&@LDzjNkUczw6|Ofh zbzJ6_*Y0m%I~qgQ-K}>*s5P|R7o^paSiDhYPqicMrot(XlIHPNGihs_UrU*A7h6WM z&tPsi_V=Q~YIzyY$9wRY?FnJ+R$owG1FCk@)u-aQN3Dj7;_2tGuN@@bA=xgLsUjD* zs@Cj9YF+Vsv?q(FSEcCg0j$?8O+T_%Pd;{(ZmMY~oIa>hNq8md@n16=oR)ijLo@D! z6??V+;NDO>DC#JZH&i)BWOm|1mVvNXH^r(M8}w`$-e{C2aj17w`^cY+;$Xp6KUI+K z-YE0xlVI_MjmwYgd^6cNPk7ejd48kKtMXMlj$jHxjxX1EEa3~b#+ppTeugBqeq-qS zuiwmsAoJM`6*}Ml?`m+b73e@X09Gpb}T(Qk|cOjfm2I|(<4Kw)21>=~e9gaHg$GVFIySU~%pYb=Na7&k3;NrB zAjgm+R&kU&o{@Mg;^Unyv98_&Ct4i_Bx`myZqcQTJqnxY(h?N8-y-oC=P}JwA70%j z&J$FqJ5MOg%x1zRByp03rfI&-=5~;)c5eK6sq*%X-o_&O;kBx`!CaXANdTt;J|kCV$jf4?P$1uUXWHZ13q!0jc6a75 z(nSH;tvKgThWY%ojw*$)JY@!{7>J5%m{RJDR;ydx56ODEk^AdMY*lg2N#Q zmeqLyDa+X`b!%s!LpHMcWw`4KdwA)uMdFP;)!-CTFFDzZ*3ZOxuJ!fQTO0h-+vpyo z{;n$=;iXIsyd&RlF?e}eN%ZZR`Zr6_7gHAMxS_!p)cV#;G#Yv_wX}1@E`tc>DNgKOwOiVH$nEm!l70o3 zitlP@&bf__)4y&Zq6e`x^RqIw1u%)FF4f&q5zSFrWPW$m*CcUm*O(d=G8#ota#^4_ zxh_|ZcdCT{w9C^;6!6KtiwLzTV2vav7p3)$-b`qfedq2|o<$Ddg zs;(*P$j(g6nuI6%skFs+>$h(WdcMQXLj}qc4G}Y9+x0%@X)ynEA>`%B&r&MSTm&2G; zIH`U{@wsu9INI@uifhFv1w1T%6!-&Mo))GjU-g97S}n%oEgiowjs4Om59DIw#=_`$ zqpQkF#)@}PK)Z~U?vFd<_w^Jt6aONJ-795hNgkL;`-ZhtMY&A$Ni&C>qnvD_fw@W~ zCI>`=q}FJC7}s&6=$_d$Ym;RYNs2KGd~!(Sz|`ufxP2V+#wPrCL+iT2$SJ!Zz((i-?%wb>jf zykDF#4{f1j_ube9K0(7cD^Vi+a~mZP z&dOzSB}e!?oKM)k)n)2t`(dPK!hN(qeEEY@!WgQD=Tn4sd}Akck?M$eNz|-Ldw9La zk?4#>Od6s=L>Tgm-*nny}QS9{f|n`Kzb*h`=K;DZ0DY*bV}q0p*Ucc{s8` zy5#wZJ_*wZGhc)I7s)a;xvvg%`>ydE1UOf8#~gjk&w!f;$sq{lP?rZTa;QD-%u{O@QO?^^|X~gc}q`9k#LflC?p!> zk4eZ)T~PMB2D=#5_3xjPowarg1~BZH?76|A_wYW&QF|gD;i9A8G@U+Ki)d(LU*z~r zhTw2l0yo7OOK!mB!cr&pUV1I;9@C`!WFR`2;R`4f&1TmilIpIQgt|7!(8RUKaR~K= zEoI2IWO_ltp1G45m9d`v+0Koe=+2p2tK4*$rr76>$U7c~doXrM=wopl>gBQP(&J|>o*|PT4tD2a4>2N+8Wb4G z6`1YbZN*&ONWVE*5?zrrztT36l1F=3rll27g9}LS*Y_ubystVsE4Cd&RIcH++Q$5c zy!;QooN+BF2w(_|vbuUc71vvfFCw0^7Z_F5`f1vQf3bkDHcc3RcjeKU8fX~q8_eJZ z-ov4n-Nq?B4Vy=@W?!%3cT?S=t{?nan)W-WLyz{hJd?2l++w>k>|}pjzJ5(OZtE=x z_gdG{r3;K(t~f4Uqb;my#C?T#W4G3ZrW37mUgNjAI=5*6Iz;a!BGEAH z<*;$O_3Uj?keJ=sbWOH`V@GPkU#0w8JH^^^td6wyU~4 zzrL3;P`_=Bmpn|qzc@OIlS+~Fb9rj?Bb8`L&?bu0GQP608p_ZzRF=T=R;%2w3oOZEW%9@%2U0#-OHGvmLCyulI8P3XN?k}8(*tcPB2KW}Frk$eL}g%UN# zIcfP=p zddgpH_>R@%uf`vV(fRg0ZceW>ow)u>i6%Yf?1<2WX2h4woL7U%OPcJa1xI@G?0PSs zD)l1SyzA@0ZUK#*K3whHe!Z8u*ZQw(V=T4d8$7k~s9T)90w4h%@9IYL*f>p(qq9Qb zrK__l{a^*e4J)h`&z+$C(ef*EtJhz~m7Ymo>_3U*+N)kXiayF1JC_kDL05(8ig4M{ zrs5e+RfFIc%$g5SO*D*2Kwha}6L-$+8f7>T#b!)_)_;Ek#(d#XCm zow0P8g6OTUFL@)?vq&=j;_B(w+``^2=F>dq83PWdP_3=$N56ZwQ8sPs4xs3CE@Xch zG{yFuSkqvAt(V9QMVMOo6*6w1*61+C^gjW05%`e5*RMZeyPo5BaLYIcD|1sI6~T;iNlmRiq`w+0`IERH--~})HV8bDEO1Ima$g! zb?UhGS$|cb>w(o?!^X&O3{GuT7s*pQpN@+*c9DkGD%~Ruu2Kc@+3@7N0c3yFU>CE4 z4}0`%>I|D~ET_P^qv)9IC;8GtdQwn`U_r`4yRCznF(rinZvcB7OLF#2_ZZ3%D&7x-QV!Tc$BC^_}czJ%>u z4d+T>9#_ks5O^`yD-blf=RezTbvc~7mDIqWb#Y2hpO|1JqtDaCQDgI(f0&O=LVYQQ z9nVPs!GeF@MHe?_sJ_X{28k>C^GKHS{tt$M5Ds=jzY=jpM|{16maPu?4k>NFd;e2N z_wl|5*m9!!os+~h3fOB6U#>-KClqP+DR)k`Bd#{-q8Dj~i*e|pP zMShSTX*INc`sSpc)LP?Jx!2Z+R!3ax9gD+_&TvJH%cq@6JvD*Js<+_=pwdy^C`g^m z8beI>8KsXy4(epoa}-f%(%HVqvXsU3Ol}}-!jbx=ML-eKyQ17FJ89_G<}c3gp2X(m zUO<%~?`j(TQI&nQIJwd5AGPu(RgfKJ^Y7i+M$hEXo()d2oVW9L>cnw{LF!f9<&MJx z1Fgiill3g$sBz#@^we6Up;2eMDzobfZwDnf)mzd1S@_*p9!7khY5}C5?{-R$&CTPt zE`VA^>+VA()+k1?mO8fWwV5EaiXzi*wtS^G-2S|c)nQeHQ^j>Eey3xSwtRW8!X<%h z5ATfi>(I@(G^+BDo7#7mhPF}vuvg{cI2^<_?Zf@5`!bQvtx!rQd zDe9@0(j%?^HI_$Ihd*YTPm)%g&yhoPg4ql2MB)qOI2y4k3iG0oEC~mYb;93VKlGk1 zpH#TSc9JMUCQqE=!BnkFo2!4;Qq1!r{w!ispHa1;-GdOL6OMrI`C#HwpTYlF|HzK% zdC3%+VmovDU;W8ylX;eU!R+=KsxE6dK1c?C@?xtW?Hn1rE%(f4`Kb4mdu^jgIXQ4$ zI71Q(++6&Gslf$4B0x@bmm337!?1qKg{^MJKq$16Cz2}m)o#c{3kcj+^vu-P>zeEr z!Tfm_BvDbvaExUyfJQP-c9AYryHz2$q8q#duP8@k!-(IbB0ERc1#=D;;{Mxpd@;>` zf5tzU!4D#fx@(Mw_jyvj6QWKNFrWW8P;lL{6+jt-RyX5#tZ-1w_PyDXYSWgTrXuQe zlJi?r$>J5ml*W?nvALU00bEb%P-ReN>e0?xM+lK-JYJVJ0b?aT(DcU1rqUA-gp1dM z7bqF`R%MQA+VrTmEGIe%noQ`NEb{nd=^5BKAWsf7A($sB>8^1Sm#84& z^F?2qcQ_k+o4D(dgfwM=u>d1^{~5ix#`y8EA7@r@e=D5Mc7AWQ+zE63_Csdpap{O0 z_VYR-@&e1fuR<>Ws;-Ut)D@_uW|FL=>6w{V=v)E)C8D!F^ zvdabT#t*9059szHHK}3s2qy4WSG#cgJr28JLYg#plc0dgCjsKtj`wX?kVn+nJ8V;D z&0v`Cb``6>G2A>2`$H7r@`}u6gr+f`r_%AwP9AKY5#HZ#g?2vr>A613OqGgoiCdXo zgv!go$V@fh{@-yM|4wYZF^_S_H^~UHGx|1=9=5`T>a2&cPl&GZoj3B|HsbEYjyqDn z-lcz5c*%;u`nPSbdF_s#Fn&b+bYy^`FTvkuWp-fKD8>I}v(~{)!(jpl@_b~?3+3pn*K#PFY>qF+Z4%JD4bzd7>&N;<;Y#<&s_$0V_!QlQ?GFS zEw;I3g5JmSZ}VUj<*5FC3p5D?#UWP0IbPDU_uhY>l;DpgeeA{m-rGJn<>kvxWfk>Z zH-y-V>^MKq%YpkZZxH$K8wvg)va=UI$bK*lh&JRFQ=IYnk1P3%TDY4y1b^C&bXlbM zz+XXc@G{W1aTc>`l;W%&{QlJa7qvx}_>MDbUJf#fq%|GC;*hHZzX8qNyLqYb@e^W2 zCJDE-bhxGI#AsG3Hh-kg^jr}xLc*HpF|-Lh>hH8_z~K<%I4JF1PydSUbuU(?W<-Pv ztbvr_bDS}iBGNN&pL%i{C05(&_(mIWDy3Y<5=w|lXzMtQrITnNB`b#Sjp-+3%q~_J z+IfoLEGD0=ziGwdzJUnp?WXgFR2jTYr}F>fDl>C0Y&IF~jX(BED8l_?pcKbXQ~F|G7Ix!+v^qVd)=_9M{E3DpQ1G<9wy#S)*_1so=v?wJKMOPCyZ@H$1Wo-rrB^D+j(!P$9HSEi|ipY zupVES?yn?u{X?_F>J%WCeUAe$RQ7gHzi3g6b`plj-0>CBv2!le3_T9kP6Fowp@03`$!jp?m6M zxplfj_|fMo2V|FNibfSb7P(QdQ6IPSNF24Cp&*jo3p^vMV%|aMOik^cgH)1!y)`wV zxy08shn)(Qki+Y*3hh`-L2suMGG#m%ZqD)e3g-`-M0aPKc8E;j`F(PSdRn_mLq|;*inXdZ4 zrI=bdyOOUan*`%ZhY-KXdwXNbV&Q6yT0Bkgpsh_FaaUMfR;@X77lWQ|Jv!w z6VASOxD3(>lDfl7{H#OJg!z~^r5_DjA*%z!SKy+XkIt$>c~6hc0Ue&~v|4=gqqpAc zZ#s}mmP}w2FKDqYlrF`qgZCjt1~&(T%@*u`0aQy7&dQG(gSZdIqcR~%^RLUOf$2rB zb)+8nRuZ%3b6F!8P?WI1hv~)b_c}OUq-dAV)%Y%-*TKSiLEU3hn_FV6taFz+ca}a`->S>9LS9lm4&8=_!f}|6hmcv2n7J zGAYUE*qV~Eu!7(z8(hu~mLTK=52tbu?0`4r-t%-s zoTts%g9@4KWU%1xbFr(P1eO3px@+lqGY9gM{Q-;-0MWq^^Nb9k$!=S*h|2H4{lL{3 zr{l4z_|4-Ck~?EiDgmb%jTSB!O;X^W5pfc#&3YuG$ELhntKNuHi|7IfV^efvlX z=4j=`4vxpBW7}RwQyT4svy}TOoYy^xH%F5l=fGI(g__zq} z?@#x>fV0P9x`a&Jm(V}i$ztZQDeHX!c7gy$GSIPuih*@+y`ON9b>l|2nhfT^*2XfJjgTgm2x1y8SoV+R^6M!uE+6h zX_)_QFT=_2jPOkc*CcqJp}OP09h_TFFiP!F`@@NJ{M``?|8D`EfR^NBvWDSFR(BnF zC}eEk&FO#?QqFYq*k8-mHB$an-f)~~S;>3O$E$%%bNVWYwWqj$f9n~)_IJ%XGIIZW zi7;M-RDE1p&i&Y{F&YTDdX=9!RsGYz$5%%iec<_U^Xu1|rfpj0;`Hp}pv1I6`lno6 zh`2FO*Za2}zs8z8AL{K*pUdym(+QAx+^&%@saGEYXx3B+%ZDeSkuN3N`sX;~A430R zvV0H=b2yGuv*3XYU^>S$8V&=?JH(-H4@ZLc;)__8Aw7PZ301z#p6w2!@jtW9{;c0# zot!Y*S@b7y6~~(LY5^jwZ;r79dQw6Z19P(K2h+0nN*fi~DPWG7E_MtgJR^2WoA}J@ z*m$`bT)#VqtAvPevgokt(gn(UmBaA|LML|2XfY@cQ>Z+5H40D=^>lN%W6bOm^@Sa0 z!Jvs8Lc7yF-yd$boN+2%r4fJnQEA&nw;z&+P%p(J{&P&0Sip_@fc`xNAYXV5GEO9T zK}g#Gt_T2Wl-Z>LP|5O$5`^{x-bvrMP&P*ZV;k?s{nRCEu~=Ut)~mK_Hd~R01zHH0 zGnY0r#>~Aisb$wweEss&wdA)oJDJKXu|3vB z{vUcL3?EF>*mQoS#lozc7c?gI57&S{@ zX8QIEnV0P6g>*wFpwn!g*C2^F1O%LI5XHC&aPns3zp@cjskbB$cxkDNR^(_GM{6$t zlx4*Uum@FajIOXPP1Rp4x?*4%d`d+_cm}8o0GdH_`!I+zlI8xkjpSZl)&`|Do?joK zPV}Q6GG4kMSQ=j@|A5sDQbfpJw3%1mja$`1 zk4<1SA#%*IrzATg#drZwH>Odbmq_3i^ZOf+^a(6l;scA-r;-nel7Zg>JC3#JQ$gCZ zx_2q+s|m#wDfNJPzprP-UM%S%%PeL7ouI|yubs9_t%~< zh)H$c#8b@lH;;S)$ZJGV&%kKx4B6B!=q9kof-pdF)<2Xsj)0vlNK!lynT$6 zvDDw)SWhHf0dmV^0l+@_HpnSg>^;D88_SkiTL($95hJUKxGV^dV^`%D6*Atk=)c?| z^fy4>#+UdiL%~yo7#x&`&036L%lK`Wyb#POY;sqzUrGu9eSZkJmGIVfHFHA%sm#k) z1OEqWUl|v5*F~#fA=2H_pn!A>lG5EN-6|y^p>)>((%mH>pdg|2Al)Jc&5%+Gh;a8j z&->o{yYcaUGhpT)=j^lh+H0?MT)fE#Hs-6~uR$p3rFT_U5RZ~S!b9ZV1X_9OrtimOI>Ac!!z6P17I zqkSIZB(nh?aG}Fa4pA&Zgeik7Fz9^v;xcAGi7r9zol=uSMjZkftaBzK2KqHHX;C3# zI=?HF9FuqfS{@;M;tjsD3W@L4^9i5aiM}t5Qo?0bRCAGHg*hVy@|%s1J_d!1%YV@3 zlJ#6&(s2+|T;-c+%khCdb=PRE*(!UUnp+8uuP{k;7t_02C(PNHG)TH%v@tvQUdKSsml|k#2wY|Ta(#P!8~f#u6KbE7DjWGSHSSklqFYv62zP(Hr8=cg z>I1oyd}f#M2K4U@bCSI5q@IL!K9z48;2gYq^ZpS^Z@yDn^_{8q=xRP^*-Y>R7!zDoEfV(@Gt;(446@E=A01KQ2=# zqovxhZAns$nA1+Nj5^_C3h|atuE50}b8D~d7cosZel2dbkVdN-e?|RWRutc{5{F#B zUkLAA!%Fjr4vmYPWXvlk;B~=HiFtE)3v>->Mb=QmWtD?{<>Sw^NbDD#zcf`Bc>WVM zFHfiv_0o=~cFkcTZJjYP1aB3eym|h6XO=+)rVr3xzGlFjCL*;vQYV%n659T~-SX@W^xU7zZg zhQYh1^uH*c#|iZEP8?Z1M>*XbV`?&s>E(+IWfE+|vnhIUf6@{E5L2lge@NUZAd>QS z<&DUj-w}e*aLyEMFT*&I_kLpn*5yL8%xM}?|7DyZDMVI36^fr}V&R5S7Nx={GKutN z{CT%Yo-Qa=A(1fBHvXZGeD9V*4~kP{TUj-&SIqy^{>X*e`D}C=%Z&Yhd{it;&vG*)RmQ!PGK(_uhr81b!|LLDA@bQ$tZ*}JTu14c)$-p6YpJl_ z;Ljiu#cUghb-)k)->M{jS+6S>woYof|&} zDI$&w**$bF<8&A5JCC!;@WRd&8W0x4*K$XVzFdwxfbS* zyC*>pjn|kVO8-D^Bm0Oj!7R+m`QGZ?_8@GrFpyr{C0L0Y^%v7QNym2gek~+Hw6XC5EE0{X&)i-L7DZ#cZ zw&T`#i>%?LTN$k{!pv>f1nh!bK3;KXtFwJkmuqmV6aUguSg1P3qV7F>J&sZAGWFyt z(~bat`lR4JAkR(FJBPd1k#ljwSIN`$UxYopfg%Vda?WLfLYITfYoXsO)l`MXl!;$_(#>mTx zg|nkmks~{kMpJ_XRO50DkJUD!MNU0#VM~kBU@fhK{TMz+U8>BeTNnO=nii0zt4f#I zFYlS1y*r?s-icD9>%8@>9Bn8Q+1&V#cu&Ae0! zA=gYo_fJeinC62i<||vvu|nV0hbzhF_t209NyW*0o!Vmz-_PN4ZUzsgv*vgQ z)!_6)7|6UuL|wcSd${7>S9D$l2X z=rG2wOIdEV$KAjoTbaa9FhC(-mO&mkE}iTD%dpLgfr*d|`-8W(yRYZiTdUJ&SemLo zK&EKIZ}CZCrv&4Tuml|98{4>dLqsw_6M|Epj-!ZiEXtQRCN~J@~ z@SR@gC+`g*CeZoHPe2;CiG@Zc`RseL+1gK{$?mbNo!&Bee1h~lJ*R~qI8#!H^b1%_ zTnhBPBFiffmRO3dFxX-z0h#x7_7sD=*7CQ#yD1O8CG(%qi`WR{UH#ef3!^M6`|uY- zu|pSCoB``w4g)+x))vW_z@NlA5yZ}`%;ZtGV|aXHY3dkyg0CxYVUZGkRAI{aq5D?h z(hwm6|2jlcEE;^tl-aZ7M;l>LHoOu}+5IXQ<1XwYl{2irGpa+<{6VCWGidc>@)|jB&s()KAB+S<3mx}Q&Pr+OLpnI zU9PX@4Wr~L+IYi1f7L(GRPDzjyosrTDUVoQuRAnkEc_Of7zzCJOIBHsHquBfGE1Wm zE1bT3lPiV8sN0lNb~bwZkhDgoBKFpz-lvPi-SzuT$_h7rw*4f~IDqH1Oj+*DDZd@` z{m1<6V#^73YYKIgS|3_iqx|0Lg0VtV(#12}A8rK)x86o{_R#$v})miG-&ezP@x%!k7 zlzO01dHmecc5;OCAB?UCqs`qCiVtM z6(?Y^4Gjr}xf~ne`_`1^_*RTxTM+FQZ7!IM$}XbgXp>V2=6(P=L$|*Q^`cQoc3E@X z7)pUCzB7?$TL!~cj;q7j??H=Qgj!ix*NJyOJaRc+)5$ zAS9%f*C){Vr|gl(I9u zA2siH3O2aU0fTwcLRLom=liN*vzrYOsW94HaVcJMng_3X-gcnKsqk`!wMNfed(ohd zchTMDIGMP;HaX8vmf0qnj-6^1mG?c?HQX5zPqRhbS-Mnj*U(ySmUOS&Fl=86rr1h%N)P8 zj7YQAUT`hJ+)FNJ7~ydrxmxT22Bz2S$iQt8^cDBrwBm_&p1{33l)uG+dw1YiDQX97 zrQwO4uVZi1oW3TWV<&>v^qI)6>tPRcXW6 zO=}k~!fyh7p8KF;nd`k6>E@PFj%ZAu48bBgWMz;oA^BUSqa<5qDwvU{)AiRytx-ti zs7Wex%X-$UoHs_BuZJk^$I; zFC!5mg*X%w`1Ge)b~$SzJ6;(UsAP6+a~d}I9>eh zq>yG@YRP~sa_^+zEP2*@#!b$!Wj)7w%|sPhJRejteJSc^24F(BE1yFtA}4gvNpchn z9tc!6%lm!A#{gFH*qp{eDDQGRPO`;0XrQe+c&+k~aUWg70Se$WdkY>jq6E)?Cvu>{G#}Sxrgq5jSGLq|7=*~B5(~F&` zLt|^DNUc%3E?u&`+BMqe*oW{2QqhQl)u&;y!(GTn&^~jn-}pU(k8l!q$z$;>W41%$ z!+{F7DIuLF+WeSru>aBy&4`G|N8$tq%e01soq1KAmhl4Z)l1PIndeV`K261JEZS%R zrJ~&`_MpG|2V;u36$7xy#8b8M=ZfIYx zh6;AxpG}$n)lHebaFkDtb>;6nZEDew91sI#WqMu~)Wf*M^MxB#I{>?4f1g5<-yg(X z;MoT%r6~wF+U)4a$W2tbo%tn7|uOoUrV^(ul(bPt7IOr-icLk^W^BPv` z3#Yj&Q&)7ISRIB{GvAQ#C2z?jh&>ruJ6zZZ*K_5FB>zlRgj{ME$WOq}{X z98FtNL9xR$o23c;{ZM8n5IGyA#YwoY#;g|Fe>IV#O%CA~Y5N6D*hQwWe^&TR6e{Jb6_-{5T_^l3^Ft7?&|N5gdPoWD6B{; zvAWj-9;6djvHBUShnklhrCyWgKvjiI5_Zjw z@o~sd!#zL>3}owyt7{GYtku+??LAhcUCqPdqZ6st`j8~P*G7avf$YBeBh8> z)zkqSY$(J-PPVZCr)sgJAhMwMO%d+h!w{< zK$?}&liR=hHPWuF`tC#4QYV&xK*)gxDdS3OUcCsCV5>nKA!C?aZRL7A>j&R^)Y}fD zzSb_^q~0;2nnv2xsO!l&j7d|U?x>Bb1_B0;0iXZsK-+nT9 znfBB1C(5rYwZgeN?;b+#V6DM^GL+3cR(t<`q}x&yYFS<+qY$0RSAQ`7J7w;E)^4o7M%&JorIeQo$ny|5vzyW!uvQD{M1=T(W@LJ z<@oZ-yBG^3#(sif1Z*@oqK4S4R|*=qdsB4bFXQUslJ%$k)TUZ>ph$9@zWlC z!g8iFIc- zHgtuh_e~lNMV87(d1pd}U{b1(eJORMqPvOj`zB} z1LG&gN%qR2iucPUBbh?#rf>8--TE9bOlbY|G}>-ps%;$HN20p*J;k4I6k_tPwVdjI z0|(%Ixu0ht?80)LwF;X*J&nmYjfh*{{nU9%avjVQex&s^s^ora+}SPQ%Ao(a|2F&HVuc%Pt zF+Dqn;0YRwz5II}mW7^7dZ|o&o#MZwp?no zsaF0tsKm`W$H>_-dy`8W1D$-WJWImW`Sjwb)Ppzso?TJ-FbG#JJT*NWAdxHesv`8x z6m+?%i8}IMk2v~_d&Mx~D!G7TGRt))Rfiu^gjVItKVGd?{nqmwlQv8jUbdk(iv7To ziX%f`io;fSYTaAIE-L5#b}Kt|{O!5|dKn_989kR!1Iygze82Oc%6h3eOse~`X8Rye+|`nY%Awr z7<#yn#H=h_5)*prNj9lV{~;I8JQuE|WiE+_f^Df&sBzbOxuF9-xuPRF8xb0`4irAzod+<*624?m``RIC@ zp!D_X6pLtHr_VtbT02~&m5nze3J_TbY%)If8{>>-YG@MOz{HOB=UfN?8px6hq+ zes!FFe)}Z1ij}k<*wDEB0&h}@sToU{jiT5suzJSM2BXx$n?C@s6ZsfYgArfnCg(`d%=`Eru0@}}d zeKdhsP6HqR4F}e2G|T4DF_?IT^a(X0nv+j031lZ*#}j?J|+5ZA8A+?kPE6xX>?qD6P(lR+rnnWucE&t`$= zT%>*14~)-r1E022)mXT7l^UN`O??gRzbj|jfRAg#;3;L}7M#p4;P>Oh<(@atYn49y z_=tPy4)2NLq{`VjY30AGCF`g6=u8Z?h*lr?RXl4c(0gr0vqLbTM3dTHGUl@Qxp26P zfwY?QNnKwX#d14c?t^(+QIE2$P&NR9#-fzY@i3X=h>SPvqX&B*-&gDy)IRj-1~6RZ zJudC-lk%+lLRfn3*Jm=m#qY*NgshM?f4MNqZ@Bny$VkS*&>5`i9`Rg295Sm`%?HsO zJ4(*GIp^ZPfJvmpwyZ(HRu@V~KNZ3!sU9Ko0-KBrpVw@h_e&vrtf!k?xto}x5Ls>& z_9mBby1hxmPKk@(VCeH-TkP&i2_JkbqixV9#_@-jy=r>Fo8DD7r!RffP`luA!QUdB zaWu3&+r+v>`4L>A$lKqr;ppa*S)bigPNQ(i_}P-M$i%@%al^byL$p8(RhICRH}0I_ zTuyKeDljVNC-k~}k$;SX+#F%Ai4@(fP|-j}Bd{y86;wOxLFCr3o0`8+Av^Yu2$z-3!M1}YZW_pD)Z!0QYx!$03y z`x2p4S`X0!4f^S%{1xgYMzcnoS!2tw?%1&glj$L_j+mU^Q8$s zq%Xd2&om-!)0;4R`~*l}kwdSs6Y1!iqm+bgo!J4gV}AXKQ+d0F)QJ+X$nmCa>rFn_ z#qQeA_`iJ6hCURA@41V*$hBEg-XNfkv*HXGm=WckEDpG%bI?4|w0!|xfW{7a_XPLn zO1bvKUz_vnyOkneo>@C$7Lr=5kLJ($D+b*AbF`@+(X@}18JWBa^su*@u0kIs?E{*={jf-HMy`F=fVLv9^%;=@YgrJ z9A0^!|9nr^({>xlsXnfib1Z9t<@{|;O|fQhGHm^%Upy*)SAQFQfr0RpK)HTCi@Z~< za4t+;OmAlD$tXwr>Bik5qfdYQu{W%V-An8wlrr^BZB>!0sXSrvb%N9ql)|#*2znzTJ#xZ^m5W} z&A*nl&g|!)HRpDCmiXw-Fl#y5Tr8 z3tUV#7Ms$26!R3864KpvAenuh=A1R07!g&VPG13Um8`%bXpxDj>vEX1tlY*KMc|V! zz34|S=*((Nck2h#0S)eK&wvI+3%j5&Y$@|)Z&#IBn-SFq3pt)lSRw%%<)z%jbN%;T zG_#x?j7Yw2yV|U@CVBh=bCB{G;U>73a}7V5uvQCJ4Tkg)k1_ruc-N6roZ@y)I6b1R z<6hR5&+!&Et9|rRb{A~n`Uw@Gzf*_LZqb5ui_Gqu-@%Y2#izkw2MLl!1V9fX@BMTK z;sXu~|MN2AO~=>qte6cOL26hL!4k^;9faQ#m%zI1@mxb>Fc2?8aAX>jF)}HdY9&24 zhzB^G#WT_iJa}(QB)V8IUOL5yt2;AleJQ_E^vtI>#4;A3Xd9l|b#=}sEZ1}_BLJTQ zLf(UspzY$Bygzxu5Wwj#>Cq^16j#XIwm6H{TT}O+D%*Z668v$E>3USITiaeQ72#|j z!^VbJ(NueJ!~wX&RhkzHYJAN?$3;<3<`x!dD;z=`ZLT?~oZ+}x9V?I?mcg8nqblXK zE{kqjEbcwL+jj+bQwYsakSAXPIRArlY=@& zB)0*H2m5upfaCnZA7BoYI%~KEQSns#%AXffUGgLa{yW0u@vr`26y@H3o*LZ!kjS8# zmY|zsDJHMy9C4lFR@l|M0BX|Z=klZ(NMqdI0!PuZx3Osw*5Z4BBY6XDl-ynZcYRIz zzb_-D332QFK9}Xx%lZoY>@_v2rVBQ8HL7I4jKNtFB43C^lVbi2XQM@R_4Gzt(IMJrbm7p(jTem4f zFsEs*#;8iKOh|@RL~kxb>3fAUw)UR;?u^lYp88-Y#M{(qWF)r~>n7&{?BdZ})ON~= z;*)pWUetW4eL418EgDaR$hFBP%F=i{H{S(boP(d;$|d7)pGhQu&>PveAHCyFV|p1y z%WHijOyPun1WhHp)bxsbxBbs!+uka~UD@L8qE}``4qu|V&WpC^0*68!E%|B?0{U1| zPV_dfWb;mo5W%LLvWQ!**;R(BqxB0{pmGyf_1<3wmPlj|I5>s2O`TQS>XD298B!}^ z>rKNBctaZL$KR?_SYHOl?42CuqdY$5rpg_b>By2DkvIi{X3lzJzf~BHsFGM^XEC}=sjbQpdAL2DEM$)WpK1VNg&&Tr^WCiWA*1e=0#Wl}*TaDm$oJVy~ zvudfZj${Z#FYPV$Ir(FpHsPy^J>^)lHR}EY{=`I_7EGCSNcwX)hqCm<=`qR8IDeK! zPbYMH*0bDi5XAdi&b{|Um)DcmjbG)LQnwr0t^tw3Ir+u!j$pQE2Y4W-e8`n6yLMaU zGN(~P1V`dDpzdHYIFgPgbzTCO#+%J|9sIGrfc&{W!Bf9O_pevS6raFyG z%YeweJ>QJTm5-%NdB?>0uv8^oi;Ocs<5EqhtU{rs1JNt!kpKxpAQu1{_4`M}fnh&A zu6L`Xa!dK*lzKgdJXGgW*hS>Ya)_GR+DCqbUM&JuH-0n-Y{a{!3+TrrNs1>`W`(k5 z`Y!a{kCDA{1wSKPb~Rhr6(c&FX1XBg-TAG9k#(MItncRb(}$4lb^{mmdo5GilJwsU z>$Y0NT(_Fz%j(Y{;}j$KWD6Q`EaABGtn(bRm^uVEAn8@b6)-cK zDt&rq3+Okxr2LY0>#q341GTnu7AN~FX`6r{0Qb|xjD2-5UAxTOs}A~y)ho9(4=c`; zH6GjvDJ#+^d7<_)HAx{`%O2+qZ;ZHJ0eFvaO^QV?x$)BJDJi^k+Ik8tn|k8IpFl)t z+>w6EGEaO(@aoJ91+(4pZ~0$tp;IlrZELepwP5py%a2PibG^y8$n|!BAKtBm=G)_Q zK2q>w4f03mE_ee!yq?OaJJjdiWc+)ICMQiEe<@3O%F1E;HhljVD&wbYX{`rxw^axzM zpC>-ZY`7TLTp(%frfO2AE3`0eG`+#->+f;>88OH&EE-*415I!%TrT7~( z*b~S+;WSc+dAekAb8McskNhRtzts_%`%&3Go7Ha@U#-6vY2fOzYa^cMRlL`BNcak0 zlHeK{#lC;hZXl*fG$(^CN4{B2V%Du>GcmJfH#xGTH8A+wD_zBbB^6w)?FObXw`gJ; zT*CB=ug}&JSJG)V;Iv3br;%6#r;N=Qv2u9gZZP zp1kr^7tR{@(14 zTR&y+&2$YczxZZOWmQ4?-;tYomoAOJTCyc0>e@sN_1Tl62b)9#-!gQw#V)ox{=RLk zoc}-Ob5gdKp{3*NU&D=_u*&?Mp4D5q8&hu7%G<0|3&o-%vPTVHPe-U$D{<0|8!jZX}#)vS4#1q z#u+^Hxg&`B-nJGdq`=r>Zf|(leQi?%jKE)=vSlXt&FfXt+naTPjW9rp6Gt*)F^1Z zJB1#(st)(8FULn0=*CF{$;FgeQRsgynW|M2Bq?^``0&#W8~(luERN4Q8AcaXBXm3H znKLv;sDz}<)Xw$UW%S^j$y)=~%;|;Qi}PJ4r?(Prn?thb%h{yZw4QLBhcq1hkc=-< zd!VLhOZgLe=bS%TDskJ6Ef3=)DqrA-T33{F zDf4aW5&CK>y5J@-CI5ULNrma1ft%^X_rw2rYQSI1+*p9`qJh~b*aI4xJ}PH0Y;z!L z4fooc+zY=)59Px{?k8Y}1ScA2L2P9BY(igmme;~RUrPm_;J7++zBLl?>rF18#FlV4 zfz_|(Nqu@kh46$wndvw`f+>wK z;OF=ru7Cf#<8>zKJj*?N2m~Q>EcY%Pq2+8t^aV`{P9y(WKD|YCYX1)7v8aM4P!-~z zk+~SfQOFi?kCwB^&_7LXRS}BIvp*b-{$mW!RFG%=zkhQ0pFa<;;-4Q>q1^oEj>)aY z4Or2uu9qAEH;?``*rNaUm&X48`>3CEn~|H#PkDFe|CwndM_{5mB~aXqb!9U!C3y}a z!HMiU@R5Ah#{RvY>D8u)_d6|{!@yWw%XwaeKqtZH$qG&Xc_cIR5z8_dkf;Lzf92ZI=THP)F{V<*q%I3enzFfaW&J8Q`>L|HG zl8^CKtBklm#8ws8f3puQpeH&Z>AVRMBREX)VDO7H`GoL%DYi_q z`FgEo4jK&k1VYfiNb#FJwx=+_s)8CF20PQ-Tyufcwj@#PxASf(uJy?)o)QcD3Lj%^ zE6QH~D4?bNN#;H23q0#<-A(y_{1*kEt;)CG1OFLO4CTRrPZAJ;$tl%t!wE=7R-wJ; zvsSR;nybaq@#pLAe!G5eT39|h{Kq(u|7Nf50uT#~a}D(M^;cY*D50)mTmx@;a)Pn8 z=U(T2JI!mPoJQT3I~cr3+<^T9kjKh0@MVQwOdUb7#4Lb}Q^12pmy=f!Q}rI@2%|NtE6OX-DdidOE}wAsEqv?!Ih4R_8mr zk({hG^+WDZ%NzKQzoW;wz!%uf@*Mmkn@Jr^^Rw0>;CVQ7!Phi}xMY-LIlfhIhS{%3 zbFb~WP_wAbWx|Df=I!&#&fTv~-#uM`{kYg+7CpN6e7B8Nm1;OSveVA-ytkL)&Pz1# zJ_KW3Tgbq@78&J)#wRbZu;u_!dPwWD9DT2y94L2v zZh1m>!&&f4hFhWK!L-+jeeb7a@#-Vs_n;R=l4_J}u3YbX2->?Nm-(36W<3#1|SrA~^Mbu@|7D`A+@O3h9C7_qu`pZ1NF~M+Q1XP`AR#%o`>G8gnah+*C!8fa&h8p)QSz4$YL*IM zVY)$u)G&$$Eu?SoH6*MW=X<7uMc@aB;+_M(GBv1>o=xLQyk ztoD|NMw%bU=>v3avu+sa)L^uN&El2WMkoCi!YGt=7^YMLQK?>1o{?fk6!cJ~%M5tS zk7lo|F2hhsg_6v?+lSakY#&uA1f9w4$oq>Xq;*Oq-y_s;iy1Kik4#Y;t||u9&oxQ( zo@{5P71zTNL|M*ForXJaO30e5QlchLoTJ%@3`c@7M(9N?}?Y zPb5S|I^X4cd-hT{#@Bah^6LeHCq#8jiWCp@=%>K<9X<70)It0Qrd(S*P(e-TTCbUO z!E1e#KE$%lDOl|sX7d$xV0%XNK}-Yi7zI1*OdmCycd7Yaat;0jGQu5WYqqZNqcGBa z6sc8Pw&KB@hk(gWIb0yh8tdcT5v@VMo<@!q;+Ok}Vc8GEmM`(F?i_a=f zk{d@tyvbojdx*DQu51-!8p2tK3_lEQ5-mh&6(jphsYltL0_<=@Qspz0&Q-;&du4EP zl*RXq+NRTzu&8E)98+^YdQtSF3s15YBDv*E0Ro8rCT04i7Dro9hsHxE&xqA2y(V=j zXtftxbsPyJzZz*(kS~?hDCtX&A{EvVNBAt-tbAH|gvSCq7L4oGKIy;~?Rg#gIaI-x zbrn&r9>lC?!X`Oshd-93To z)2zCLEL!Le~s{ zBZ;rsxMYbj9H3Wm%SzLRUrWAky$yzl3NK+NUWyKEdWLqmT=@$|mM9Gk6O=$h21V-P zRA~@%O*YV^+QtFH(Cj(Au>-?74+q@n9W^NN==)Y+etF`~_e(x3erYBvZp={_boYHd z^39^rM$+gVR;CI-ox@UZYzzx`FiL5CqI3k#g&V16(VZ(9*FShGJ!edUgF%&j@{404 zCJ!8uS(pPY;OizQjlG#m8 z2QX2N$$LifQ3~AXN$16<*qwGSVpz~X+}8EV7WA&;w4YFV?(57OBii*Fm$L_=Y7iRF zd*+>953Iqq<$~?xLIjbfMc}wrsr1x1e?}Gs$1}^he24x=(@|<%#9ld{{jyDtcQ6qr zKfCKx`2?h5p6d}()1*v0jhTK;E`lxN>?vIAK zRP!U|jP_P#{6oG)7hsb!*A(M1P!&Cgf)tJdhv|r_f^S_px63~NoB(R`5>}!+uejIF zJWTf&o`O4ig_~dkkyHZZ%=;)3viwtVzvG{v&R~B^RvpLjYKEZZruL>Z-v{mn zTAKiv!35*v#TVF1ck58YY?^gOi$%O$K1?rJc*?S1WPy!DsYrl312MCY%IJa>-0fo2 z8JmC3vHhjCp|p=*^{H@qNq7nyjeC%X;bLV!^oHhtMc$y8cwmgz;o;^Wl(bjIE$mRP zt=8B;j3Zj$tYfS6oJh4)caYWYYLK+a(aClvuXMBMelze<7#m=#GC z+uOp(m>Xd*geOE4!k`&CXsst*_{-~YZ2_+cgqG+-;yqDh74u`E9{TEcTY$ix;^LQ3 z>ksOpQ@KP{nYY|YmdiaFaRgiihhP#bufbDhy>$XX3#zrmT>KC)njK{vplI=w*S)eN zqUi0bBAm-1t4vJZo)~5Fv73G2X}t!JuY5g1poD)K#&g>>Oiquu2)YjdxoV}odpEU4 z71F0bx?x=7!E0s5#D5)Nq$G^e&HM&6H67)W+};OsBFj)|QnVr#A?1`lr-;A%9OcZk zb6ZzQJZ4kZPyRi0E_@mIm`$WuS|i;831;VM_Fwz)Hn4KaugeNp_1>y4{Tv3~uQ|tx zM%P&V?V?fECpHX%kGlZ{yJ;xF8EG;9&Q%A4AnE1^tvU-Hd7rP-bl#t#!!C`%O#709 zL18IY_lGPhX%8gW&p~IvMsp%ZZDLp&+fX!p7L+`cJT(!aWm8W%o}By>Zsf9^RS@Z7 zUXMG&pH=^W44{lnUhDAdjcXe?o+14fAquZ!2_G=#-^HhbC0KNQ#_@{iW2XG??7GT>RBCAdQ+W({?dwW(S zq{?9y58~GZ&er{!{Dw45#3F*W1jawl1K807SqM0}2&sIZm@yVJ^xZC0dmS6QcpDL+w|N#^rk$9mVi8H~EB}cQ z0w(1Qa<}6`qEDe}(3j1emF&QlT}e1x_IRqN^<(LqM4#)*o6FZsrFz>UNrC&Bm@#li zK>kJTInz42`Y{j{@wemitk)Cn9wDx*glgV3Ni8+m#z2P+6`-wOOc&Phst8h)$kRkM zy~Zoq94DNKxj#KJP#g_uY5{-^^Yb!PS~z5(3x)snmWzk)A3v582Loc}8bmPm!o^B<7h6cmws}X$J zb}Wx8J?fLDGp~AYH_jy6n+{Akl)M?W6iu(lyPfO-A%woiQgHj+*XXlH+6PX8`Dy9{ zjr;LZ%pPu?rLxA0(Dh|*PwPErlAq^bE=oKC?)i&a+Ei%U2R}WxJG?*WNitwIc{gt+l6wB8! zE-mbBy&37fe}DYv#4xKSV}^yiejm+M{`IGB=VvFLb@$hOcoT^t63{D#DKhAav_HKk z|Gk0W(v$*)d&Ku<2-`L8#RdssazP@fo^R&s5L>1*0pP>9A$z2c2HRKr zzWYt0`1@Sk*p#2H!{T($O*#f zDf@k(>>355=YVd6N{)&S-G*r~B5#q&r{s1TM?=&qxz7t`M$NFOr@#h4`93%bEz=1{ zsje+eC80`r5}x5UqvhIcCgUGZ-dO?ULm)f$kcLCHOKUvo8^Y@5V!R#8_D$Y|pDY8R zZVST`(rSmQ<}ae;dffvz3r2o3Ng+9Zv$@)pPvy&mPs=&YEKOi2X!z_W8>OzQ%uGRw z)gpJ>2E>1K$(p;Z?elcJTN=y?LeMAZhZ}b%&@WMCC3TDb$C2}ax5kKgNRirbW1k*< zhG6`2(G`_B!!ik~NlZk5w*r?$Urw!qt3!9F*28A(B0&&=YY6=D5?2QWG*vH#q}{gP z!vF=4U~{;{=$!Axi|ymV+q4SDrHKk%6C}uISi8tQZ_u8p!CBuJ*IBa~T|7YU>2puv zagt#h+CEaXOddtfnzgYzxf+SDTGp%r5z(-O>aB)n&#!wQYV<6P8V607kADQ?E$=rt zyeIs3GfpEz*YB4@Blt>Y7%uym5q9F0*yjWs{L3hw1wXCm0HQC4$q^MQgtRnWpkXMd zxS=l6r|?SoRWj-ouPGi*Qd#QwH;zvpxaq_4r?`Y8Fz|r=y?V;qcD&}cj1Fv(mypIW zhOuAeq%nSJz`0=Aawhhiogpzcb^nZl?JqZzbV;@yDQ08V`2&GHS%D3DyCUQ$M*z!g z0~F6!9Xw#zt03-}PUO}52Nj+>9UWG0AFnU}lDzQwQ*C?^-|^S@PNViKE-&$)A81e? z`j_7C?vNwD4!^PcF77nVNqswMx2001L?@2;m}b7J#&mbK<*4NG*T)Z;F1EivJN_T+ zy>(C(YTG_4NJ>bH64HzAMp{A|1e8WPlx`42x?|DZjf8YaNh~@grBi8;j`J+{`+nd1 zoilT0&YUx6et-OC_RJpLv)5Wr-}iN0_jSXRpCwhZ#!em3ua|gWYU@G<8WpoC-<%yi zVA%+1{Z5CNjD26lh*`5Wl=>u!a%@om|G4;LqTG24dg3r`wKi4u-~fJG!U3MBBFCE( z0AiGs-v7w$AeNYlpJ~g2)vB6_<4GwkjXEvU*~Z}X;~6+57DM4e%=86Ad`256EF%N+8~t zj|^5DjCJz7y!dfvUabgOp__RPl@?1YPr zj*vXg9M`d&RP@2N01Vsx@q9^|_`d-7>PpP$%Sd(a!;a_aFD|!`L|pfK3~qk~1+TZ= z&PSgAoNiQ=)^UAt!qImm0#`;U{(cQu$uEgQnLc-oy17wLxlrbh_nQw~K>eQ|Ffan% zYk+chl;%o6w>`l$13<&8PXNucn0jqiDNz$0MWaGTlRcF6M?XBOTF>T_@HA6Ru2w~v~&dBNEExS{tPsVy&GlV@(=@YFP3_A-y=3tX3 zYy*3bW1E*+BEXT!Z7};KT=;_dN`;WE?S``#?!;vuE7_kPZ8?2Qt2<->TD;rJ%7hbl zw71P9pzf3mEVO+R>hz<8q(ziRI3Y}!!cOxu8?t=?b(j+YT#XWE;wg}lgj`u6@H-L2 zv~~i@bVpou&$ul{0T#V#$`Drwg^d6yTs#LBg52&W6*{o6Mlr_EFf@bfLU6NY$GZvU zWyiR&6rQ(wT`6Y1pHt-fNsuUQH?8NItp-RrQUdu+`>o*d-Vfn;@)Y9>t_G-i4w)jN zS7-C0@%tM~0T@fyL$=<*vUfYmZTM3^?#r3Rj z-(}Tw`NWYll}2xr-vTtzc?_~b@VZG}FgY`r-HjV74UliZH{w=Bfb*spbo;mSUKm~^vjw!zW0-=N0FR1bd-k(Kt zeawbU%*o$Q*BV1}KOm|M1RU&>;!>EAlmC33A$&Q;_;N{%<1{QWUnN7SVB}}3Cm-$) zsb=)JMTBgHGUxzcAGsqoy?$BCLAC0|A@bBy-PhPZ@WwdDXF7<`i&2O zzr4eO5|!vmYLukY_<+<3p{TBRx#Iq+BDDajTIc-d3){|ZEfdbTlI>zXlqV=KBTh`t zGc|EJrj9a6KfqitU#1cl&(~LBYKuMj+YzqlyB;O{$(DO#a%2j*eqp_0%rrLfu@lgE)20{q%>**+ z#KN;}CGrKzRsW)|tZDET<55f|t>u99l(ZQF*MZwyqIY&Ms7ut(liOaGsF{x=m(W0r z;#f;OB=ew`&wi0!)Ry`1 zC^3;50V*-qXPUxDYWG*$ZW8C8yJg)j-TvGwT5wywXbMBdxHFZz40tjZ;QYigU4cg1 zRlH_N&-JL+eOLPFhkc=9^?Bpbb>}CDJYny}_;>!aie&w;8U*?(5ZnW+My}Q&pdonM4-M#15{#>Q4`9h?yaZ)RDCx|P<+s=W=Zq<|l?5DF!RM># zQ?l!eJ?fgnpCr!;#na1@_khu1%v16?xKs*3+f8-mZ91twpF#>Z`n!c?Y_tt{nEC@Y zg~D5iC4|AF2;Qbto2Fs_>WI@(Mx4&IzP2C1EFjG}xY*yGtEEmk+pE9d5c+m~aKF~H z>a={}>cwTg*x82Cw)T5`!-a}Qo9f`lp3CR#($q7u=dNuZUlNL-fD3gUB7+S9IN2?* zfJC*A1@AJm?TKZ2+EazzK0Gyas3+J`ASoKhzV|DW^R$-v7$fXI0XQapKbLr=gl4yp zaU}77ZKmMNg#S8AjU|Ria$#uKe#H2}^ELho zv4Z`26txIAu=AxQU&f6r(l$HSrQ6>8pFSsiu16KdlHM!9p=coqeXmwCJfGEb(kMFr z9W3zWAuZ6TMdNBwP7IG*N(HJJ*l*jp`4;X%uNG=OnnJGh@h86^4jV!>(Hn8!aCTO{ zPwND5lHQ%~OrYwW0!29}Pio$8UYx@Xx++n^i)42giR^^K5_$;`Zi1AX@j`H{#%)$6 z&{bWgAanWwk2oG8XQQlo=d4rd@4u==VB(|KlgDaGnWkOGGEHsV5I39Ju|E2Kha6zB zo&(8Ktre%6Kpt#avoyFm((P__F7`fkH}={J{=TQv;#QUMY8)68q%z9+R}&ZwpR8qE zuK7DZ!E`U;>p^V#+U@f1L-vJN4pWIF&oYv9E$-lV_2rvn>6kjp*jK;_QwY^{!Cqz` zvG~KM<(7kQO6LoXTT3@X=~(+*pLy?;?{}i`8%gh%QGI?^Q9dSlDO>YW36Exy>gkud z-YnS1M+M6I-J@LZ=7)fr%p-vYT6n14Iq!AIDC)9116J*={`uHr%feG%c(!Z9g`wq% zgmk`nCCj3O9&{gY72gizMtRKWwtL4Bz%n5XBt~#;`YzXJuW{*oU|(A0d2VCsJ(eSd zo_TgeFk!QTk4~85+-O2vF4aw?wZ>s>kk{nFAEhw$ITYAJfLO(V>0K~n!r2G?%}Ibo zkO^waeVbHh$ z(jHe@**N%vzLJjYAV?sfpRoKQbFIPr&h5p5T>pThg!pHlC_EvGI8gDC4BT4~(W7v> z)4Uw?J>TXnZI}isPngI=Qmy>vbcgb`XxSZOM~rq`1#}O zf)T_33|H=pE$)DpVvT`1vBbOZj8q(*Bjyea8DpL)df$hNG3`rnkxWikE@Cmi}3gH!vq`@ zJj?S;cYYs8oFax119pSp+!@yH0+Cd730$#1rjp_$tmq0+ zc-5W$!aa8J5l6CXfp#pfK|39w7+bX z>e@#j;6!zfsdtHV-JQvPoJ`WvuZ|6;vm2C)rRG{hZ{Qyc#~A-Tf|DFpy(VV8CiO&$ zq^|z^2Z#-Tj{_KpUfugyVD7*V-4Wq<4dB@F_i;)I&@@mo1GmnBgEVku1o&76{P~*D z*t42s5Qv}elYKiFo`fxVohX6=CQ#ge3H*LxLW%{~-xUm=ChIfhyZym^=_hyzFMSAG z_`~NGUrY)ILG3cfdHDe}-x-GZa|n)YY!msGU4H8&Km;+ zYNzYd9q0wG3*3HrYq)#Y{~(d;C&Wz9VenM@66oKjq^=!+BhP29lz_asHAJ_r@*;qnU(RwxqXxdkA@?FF7EApwKw)HhK7O#c>J;1}7kcAB&Vd$dVOgVGyK(yIKO-zM!UXh#&CWgk6U3k%+cCqEV#vu%nNmJL#gr#ap^83_`s=&kU@7GU_0KpZo6L0{8m_(lHbeUGJA}>=v zT#u2iyW_8rJW~*_4DvRWSar(;S1Kx6SkXb2$o$3Slg2R#ZUYFG?`UZZB3JO+p2eM1X?M!_kd%6`K9 zmIC(!eu||HcDh-9-$4?V7dRa{aKwN{m*d*p5_1A(sO8nS{aA% z^S`I*r?mNoMN}Hn2J{Ppvsp^}@C+Dv88{l%VrgdULv@G$iw^ zx(%aXI^hLZ2&3&+aTUs$B5*gTnP~%`2t$x4hamzluNM}ZKs*6kiG8t^n*dkZ{)17j z1Bcm_LGU0m13g-jgSAasN;LT`scCd5733zcjDoM(wU9%1JuiNI41|B=K z+`MmaDN*62rY;X|1V9%GV-U5W!x`{StlizxRcCS!%JQT3BLq8{#WAR_w< z=sWb1xvqb#2^i!L^YVX7Y`4(7+`V=|X=usoBwAt_mb|3!qXgU$m3ZI?s8+pdECwEb$leD2+N^rmg*w3IycZe`2%)Tc;kJ%8$Tgt-`x8dz#JQen@m(r>iGPd%YmlMuH^>whr*#jdW@-?DGBs zgY&odzcXYv_&o0r(_4z)j9|6dHTUwOws&`fU5kF3yzpZZ5q@ za#ei8wsgICe#e~C5&cN|LbETPL!(m>u7?MjaO;zv3ZJUW={7M&+nqmQd^q(Vj-QX; z@;$b>@wq#2_I`JzclW35Zd#n=e#78yBlG?;^JMF>&qWu&F@f`xD|q-pE$XIE+X5g zqt8gA65kzg#V3uF^CUmx9gP}iJY$zMb3kCU=_1X{1!4mr;;|jeLDp^rtY5N4wVHgj9N|`H(B$4dKKRYvwG4a@)KP?O$T@be z$0NSvzTb^pOO;6Am~G>jMpbw5T-C#wX#f$J*anEIr@IGo1Q}UbrFA};+>|SwhD#TN zD6QKm7EEj_+H;NidmS)#ehqp)m6Ye3>wb=Ajgi$H!XmotOAhViq>nBZGMt~hWDCyA z%LD4+bdiru2;V#aUDU1Nol)@1kHc&*2_?xqUQR=1CHL2v4@SHkVbr5dGi1kG5=Tdb z=-wX{j%23B9xcx2GnK*u*9j_FAw9~V^UEaw;z%wqwyS!{t*gI00g;s9!;w0<{E~n} zRp;xr0J;27nE(ebG1Jktj3q^YKi8;C%|9{?e4>0V-M zSDT>k!SMXPgjmegSbkAx03Q|eUmqnDVDn92yDgs^((?JBFIEannnKh=u1B3pPkeWy zD=fu1bDyr+s;b6>8At{M1Ymw1%gbS6f|RPGQ*M}mAr-4=(y6$w;Vf<9b}8A+uY#4_ zZ{Pkbc`SMNKAbOZ%AY?O`!R4L)CG0j^9CwOTz=Q_?KPljw(mRZ|mR%2F{@ zp&#)vl=l5iPcpFve#Sdvhy7(k7b2C}y%1Y|J*P1#An;7*G!30!lDPi?IEb<5r~P{& zmJ|&C^;0%f*hQI=pc;6G2ozCYw0HjQDK2*-Hv7B@uBuvmL1K?i~Asq&1U#+!5&Tyc$ct_iM?skEPRAj2y}$H zUPs-aNtEODH6(PD=@d)Y$uri7^*=tR{Mo5|mP2vvn?Fa2l+&ubF1^XsKP#3*NV_l( z1c)sd)4Uelw^k0|jMQ18a)89rXcs8AU)n1UEsuk0l=w$GRLkXa{wc9F4B2q1vYord z!u(R$@8WZ`xrt)EKS3y!U|!9Z#6?`Rd-k0#IyN>|yVeTs9$Vk$#W?wEsuWbr|J+vW z2aBU2AuFJjuNKmoFtxC&qQv_Ek5~)iB8_iPykc62o!Z#Kg%__L1(DjpF~-g8sw>{$ z?aeON?Zm|W>GDIY9+(?WJ3Ajz^$!!%CNOrItJ{LSyoTGM*8!uF%Esqvn*X&2^;zgy)$h9c2;H-5tod7j2q z>T;t6ey&1Nn(m#_Y)|(HmA%J72G{jj%7(q%c-vEV89)1Du#P%uFWg#fDgN1sz%{gg zuOi(F{np8|#E9v?SB;)2(2if?*@NSmD`#gURVdWntKRYYx%WwG0J6{EiD^v!eWUHc z+Uo<)o4evZt?NmNm36LzIN{^wSM^(4Pli*T3Mqm1h@^p9DIewON7Xw!>}|I>jC&ex zYhSZOkPB;HEzswKZAR-4)|ioyu>S}lUOcMVhp%d%;@7f?*g?7P13VHUVJ>bVk`Q6G zHm9x1cd|FOu)qyRGYrVRvoXwHNQV65?}!=kGOqIMV8&9j`xC96D&oC^rt?{X$*Zb=q$*<|Cp9=)>mdUJ+ z_*~qE{A?!PL`=hUAQy1za}}!Z#g4TnO;W{iX+kpz@=`>OEjAlUX}nE;{m_0Sribyr z=PnC=hct4FR=Qj4!#?QzN{5c@&Nm-MJtkDR*gH`kq=c-!z3Icu`;dk1OH=y`ai_Rzz!R}-Mh880q@jPMRX__A<6 z3_?QCwc`|&g&juu+;qh#p{0X36qHc^(ce2?@`)-gJr{)ck!hM#x$W=<#Sib9R%|R3 z;PYObm1V0!&rTn=CCZ+grEfRc0UHrM+fG{Pg0E}BhSd6cTM%!?Ol#@;sey-Foa7DH ztPaO%*M+MUKBHOqwg=(3kmp*}^t(JjQ#g5B;+aG%_dawFk$K3Ble#qV;TDwRj^^hYvJI^~+WC$L8wmkSveffLA2bVos@#YGgFc}Znu zCGG()o^9Fvla!O_74r+4jA02oqG)W-v4^jRzmGLk}gy)r&Q1f+o)~I#*UAVC*!xvDtI$> z71*Qx_hm`i!qE0S+#?qKqW{)u;FxNki<64a72GI&TK9vmovPeAj+=>H=YyAHyTOWO z_!ud5_%D4^Jxz=V%S0xpE_^@LzjLniGTGDn_V);Hn|@og*T3F{Wyt;So;%TKp*OQa!waR|#qRG6)~L{9*Zw=OiKlgzuFT)>_NYdISm#|5 zt$^Q&hl?xNrs_j1x4@^}@mi=5_gI7&wAqUG1Y#k;E1EV+Wvq__aP9L0&{>Svqz@-fCg8K$ zBp?gzr{ML(z?v8YjOX?KMnzj-3;S$m2bqmz?44ha`D?+z^Mp`z%Anm~_fA^62iQst zu@Lpju zIzleH4WQDH-HY3wmC_u9n#5~>Gv}b(ZU^NI?-N`xNgX2>m6F&IXvZEUb5)a25s1-A z`=VyT3%M3nO)!a)i}3fxw@7_dMWqEnQwGemX-ya`#u!jutaPLwy}_Z}F7;D5#ZO!_ zefMq!6a}d~@ar~kdQgv|n2XZ#5(a>MBLYxXRkp6T(geTLH9&z{hnQQ%FA%a~1(sp? zfS0iK&vX~ZVB^E_pBcx2KLobY{MvxzEPZ}SfM*nJg8>^pxo$$p!=g|00ON}AAewoo zF1ZRS(h(OwS@kn5o8a&q?&gAE96&woqQE7cc~}H=T>~w|xbZp+ak^Dr(Gy5?jy+mT z>qGL-1g;N>6sVP`jB8pd^wF5;?Ry}akM!3!>(EO@pwM%14>10>SG z2?E>gp``Q&#K+jcenPzw`4BFOnH*cuRzk~q<}Ap63D&dkJFP4b4*FZt-=xkp*n`43 z4i5<=JCcurvT{uU;Yw zuhhmnI!sopeh6g-()^bT_01QpZ*g8a21ulWj$3wKSZ-QXk+Ks`%$k*H>tARk-;kiMGgZm4q z`ty>?<6LPVGzaFgZQ6)PU<-Aq!T`ze9r;N-4zeZNN1&jhGFBOVG0Z)~&&7rRJfe@T z&kZCxU`G;X%u&gpn$X&QWi$bd?81Arcu+u`W~svQ2%gzqi zMgsaXODFOjAUUOsIBy5Q|N5dLm@asn*WI{zXD<4HhJD;QE@H(6J6>^MtuxNS z;fDSTzdPuqC>944!crMjuILcI{X@JYf_Nz4;4JY9EI^Sxv#put%a@=Q4~70oVKluc z$&(J_2PJ@zsmscGz^tc_rGx^4(9N7dpgShR@`ci#g}rU0PQ3t1CBUh@oIeP`hfv9! z-(4Tz-(27kG#Pb2`ue~^cLonpdR07kVR;=S66#PWXx|nY3t-}~-9X8PsFiLCk>@|n z6qLTLMGCQ#wET>@$70-CFETFnhXPby$D0>&QkB!t@D@Hq+UQMeSyW-0%Rh%)3G;Tw z!jbSOQWvQj0~OQGvtKXzO7DB>YA6(eoq{x)&|-8DwGG!l*lHB;7Dss9IS!mEs7ax7 zZ{2`uVtrksRFh*1gx>e_OLs)^!FGfV`yiG8utbkm%NyS?@K$eW8M6N@1k|fMgRg4ujIB0^e;ZTkF zA{I*BEN;y%)f@h^*)^Ai!or6ioABm|P8(^Y&dgq(qi<$AUeB%VJ0Ho7-z$dxu7{5?A2BzVHsIvw36D^8k6}(=4cI9~MKR7z9xi4!R0XKd6iJ z;hQ+cf?ucYdLViD2ySe#Pu^|MFOb?Lhe58^;pyK6E}w7|H*xn zwYhn>_WWK_of}S6}gL z`DNj7!D@7NUcDz`!%;7eW!Ja+S(Nwa@oW9K>ic{z95TEQg#ZnC*iy;r z)|Z*>5^0w_orn6eSEPm<_cOKK)6LKJK7wrFU2g9$jX#|*p`IuVTeet`9z<`PeQ(YJ z#xN>to!#-P5zuT<>vzrU>+kCW^wfoINQNkH{{S^B=qCU#cLJO&valRdAV5##pEcBi z_pc%!lpq`*1Kuf1Y5i01cFrM)93I?@)vXSz+SqdjTLXU+kfW~6)l^q=u4mDFieNer ztNi+Q$gGb{mYt$|cC%@& zUcEqp%77<9R@xERj`Z$5PS}p@+3&ucIg?K*WISsw;PvU-cMCNqZI3F8>C^*2j6bbK(1ERe*OE0QiG$3 zv(=rM&P>bUerf4%Y?zFJ*3-cCPizE-2=PBN+@4GV7XW~r?ubwyEGjiSddsaE)x<4HbXf)^ubT`$Zdxy5JT56Zcr*>qVR#hbU{74SMhZT5s+S2YN1K|y{o+p>b9~}N z&ywGL==F3H<12==^9U(j;0hyh5z`iAsw*E5qt5b^g(1#TBZ)@(>{ zH`kpjs7^)$|G4c8v|Jt)pMQMawP@sIi#deR{LZ*VOEC^wTvNmSHxQqkRENLk;8*Z? zd;}G{PAl+s9oFxfBk|$LkK*h90G`BK#g6&r>r%*{bixiQbgnN`Q?CmP*LKJ17jcwC zH0nqeNOY@-cRrSOfwpCtIu4uZ)!DPNvJ{RJ<6C_&$l_x1*1(n1NdbJ4XywV1RdXZB z{+;a+^dT|=@W-YE-2OH`Ta#kj<@5MNlHfNyYXyP?kc zUuPKzi+1#y#!f(b5Y_zQ`|y)4Cf#p8t0I;oxLKDfx1X}}e(6LAm9Bc`{C;FTWF^?{ zo$~w4dHjO{V50b(mX;PIF@Mnp!Y5C1>$ktoPY^NvMaZ)K*VY054%Y;}(Q}>}@wq#( zu4v_@rG*y=-M_7kfr&$m2%p?Pqj|Y*f=2L?^q1)?ilI|>nGKtxcZ`>CM4J`{_tq*F6l&gs6RkX+>cSQ z41YnJB#%M<^eZ!C|FD5RjR)UHeAtQoF0qz7inVv$M3AfPc0BAw*x*?G-LIh>2@`Tw zeOrg)g8Y%}JAe(Cz+4UVZinj1r*`K%1imsChKbQnADn&f4FI|2#@Vz|t-p7Zutrrp zxVGYPO90VUFj*y zwZ*BUI?|&KIM2ZWS}>X_ z5pP3c3_1oda4%oLuVL8ho=6^HR*)vz&Xyz|kn&>g1VNM*>WmuWZX5fH+YQ1 zyMeNh;dbr}4rsmQgxPGhsg2KxdUj4u4$zu506@=1`7P!@$QNEDv>4Do6M~E_FI^DH z#yDp|KWP)7M}ZmJgBr}HUyPNS0K0q<2*Z8jyfYravpfm6x&o4HhXl&Cs`W-hQq+;D6g>a#21kuB_-uiC3m6x`(Z;Rk$A(-*#rTcL3F$%J^@-f zgCqaTWR|1E=zanExE;`AaUUD~sYLqrWBFs7)v_Ba3i4+PcAQ!olRDwvf;L8Fi*X^j zZ$3Vh2aEr4V!uL&W&#mY@#a9t{esiBz^A)YRK)-L=G8tHY4cQr3bQ2OS>fH4;0f(B0D;2RSYpv#T~Fv!y-wz|rXg=3b}{t9oy+eeihDUJa>?r!fwyZf!%-I9bd?_BSL2~)oMXxy z(bX*49>~<)0iOh7Hnh0#D5S&HwF%e=u(7(;Ucc_XPC=h@P|VvZa{;@S&64DatC1rG z`MkpbF7BK_tJ(7z(oAN4{l@qt{hd$CH47>yAw9;TSXFqN)GT})$3*uF3GVp6_c>xm z=i*4vavd`6|IFJ8zpk4nRI*80yC}fv>VR_-4x4O0GcMe475gtRn{GE=XA>7}0;68n z{{ph(w$qq!8&ZO+F@{f)YE1o>1-wB`ZX- z3GrE0Xkz^e{p_PZ!13vaoQVR>GXVC0gnYfQD@ z?S!p%i+43UE!u~fY7eH;w;^&X#&4PgJ*nNe{3n~~IlcII8Z-Tv91V`HuF1Z7_3Fc1 zO|q!ZxA`SeRc;_ngI_6QBv4^EA@G@ATwu25N)9_FY7ucgpNS;|f?|iEj9Pkq&|pBbcVFLQ=k9 zVq4QkXQ!E_&+iFHrH08^e#_wNplO$yELwxKobLDJ?qOQ(Eg|ZDt*UE^9Sdc4ZZ%>v z0+)bg_{-Fht@Q@%!Dj3I5fueVb@MFLM1X7=UL$?`~_BD&??KP*D)L22wl2$baoh+O7il?d?u8XdxL3AD<+h0 zd0%n~8TX?Xq&&<{4LbGRJFEPjk&FI=?T&rbI)WiqVfc_XQCC6;Psg7E+lutP)O5o6^C%-w`#mDX7r2C2+!esOq5W% zjS!=>c?N5@E%8$(%C z`dz}3FNA{&j2`tg%t-zi(Ne5h>r3XfOLj*`4XQFU(h8Zbbbux>HFa~YIN+zULd;DO z1NO!*>+FLsuaaqeBDT9*@77);d9PtQ&P>je=UB7sNIBz$~c4_*M;*Mw+&TO}8Qrmko4${RC+RMz?X&z|4kPAD=NKJ}AZzagj;w87d`Nvo%}t%9u5@hT#p zeElq-Me7StC3tddtp+3xT<$rwt+JZ*y<6b1ZQd?g05+n);;8AE>=c-G+3%5ijyrJ! zN82}ay7_8TvKK{7w@W#9v^_e44NX7E;`=`$pLmH|KmJy~PySo5x960<&O4ouS3oE( z5et6V<4Xf6eI}2!=0hKZp)`T7_O?oTA)v>?RQuO*wPGtuu5aRNqo#>^W^*s#+@>-= z)xxrhz8mt=e-QPi_6}LxAnY!Kgj7rzg#?1=gX4>WXdr%QUb`M&CaJ$=Y?(KtsAS+Q)$*+FJ;w7o|LJNhI0d9NKRSC~6It=J3>Mj} z*8LLJaW2lpm}og;b#UHgiB1*islGK}ekp6VzTPw<)`dpnKB36>2Q(xt!jw(m`BZJi zX1`VmlD8lXDbQwZHl!3wp#A}zXr>Eyg{xJ7qUc!{UE;dGD!6`?+3hn$qPhoxyW>3E zVEkZ72oCRC(UE{C{27oJWa(*wJ_f*TKENaMA0>>RRyvPv6@C||+urS(g&To{*>XkG z(aB7|fj|dJn8*$u@GwC_eChDPhw_p(o3Ty_E-hi z29(0sdB+MYD=}?S*4xAqMqIzts`{Os9cLf4A{pm{MY7QWik|8do-R|0dmsl-+-pm8;t9C>kn_=ZdIpB4pqI^LV1KIm4PQJE@TeI@m}KegOLnwjw)Ifd zS3f4J4?TIiuLDaM9AMDzlQqz7?nQb)lK3zlMB3k_G{3CY4HVs!lzyR_e1+Aw3(^s+ zF@~N3w{S>c8n`LWeosf~jE%Jn>Q{l(2QsAZd%q*|P8yvk$>b>xT~j{`JO87(FRf>h zgYN@kqKE2;hZ=HrOj}H6%cGQ^FI0c$5Ugk=3+dH9naP)bGRX|>X~QgGPW1>r$~-aK zw+z<$LmZRP&2F<_5b?F3>#=R}Cnmh@p8Q1pxpEyo%P^C$v`|Ez+~Ix zKg_)_yPb=|Nc^UAh$`Vg&&NcJom$qj?lJ1Vl@8Hd0y-`5a>6aD0P8FSN-5&shjU6v zNzpGAD3Y-LIC;x!GP$$(NgYU%$dPvuqS1H{00h^Jv+UF*N(VKm_N4 zakq8Q+{v8hd~)SpEnhcLC4tD6hS58-dht##Ps32R0e)v$J5>Cu7nOJ(!+o|m`koMR zcd;TR1Q!jn2Lf&+ecu+D>7YIJnQ?Ki2j9)5N4+jblEvQKiZ?O8C!r(Rz&%7gLF`!A zjqdqeYJ0maBN{v9vaFw{Q)A()95J#*0&s=AS5QC^1-8jjqh9idaXLAly^SPVkbar3 zR;(V=baOn=;owh?hs!Qmw@8r^JK?|dR+==4noXu)M#BD4S%92uIHEU=+OsdnZi8gA zGYGo!^i){=T0aKUncd2lnT8*ErPLX$*v(R=U`oq#&f~?d^U~`wm{mVj{H@?faoS;5 z(@n>iqOYVHx_3sQzDukZ+Iq?3Ks+S*@{q0!F{Ck|{)H1e%OOL#iY)$a0F>eM^^KK9 zW&XQ_bY(?F(~VWE;q%9qy8DT{3M{hBeEG+So4NSre$9qo>p^o#Zh=geRc~0}r){n! zS%=?x7tB-QpVlMpb|d^ygKqBBe11s#BwU;5Y*7;*207g3yWhp$l+`{&Au#`m2;Q6m zlCN;P+RcO#W|)zcFp$cWFiPPNa#7==ZA%d#L7;JHG)RoAP}uv(fo?B3Ci+LYgq5VS z3WsV64XY#Vks8{)NNCYP~vA4(EcXx9x$xf_+5d_Gff zOdw5r-()3QYo-&g%=XfJj$nSow{MG>%v08Au&-}>zP_Ty=<4(l#Iw9;G7S|$JErUp ze)yB$f(H!>{2NufFr%?nU@;6d13@B(%rGM4ndj<8MwQccA}jI>-}*87ewqw11eyoG zkc8!mhu%Kuu196H1WNDUD_;Gf-AM7jgLMpo1ziBOU>P<6~JRIz4Cv@_4iv-7wsM;gnv-4GkA|B{HY3Dysd2i>@*3ht$MOU$*Z) z(x+FN)i)75hxMLc%Zt*|9O{;k!(PD%1}+iEpmil_`VzSfj=3kQG7yts-}S96i=b74eS4jK?*F+wXml5?~Y?8U4+j=j+fx)E)$yY;Hs?dN} zPB(8F={);zE`yMc{?XHvj-bojHsaD*N1H&ZuatX_-s9_%eb5kTrFpB{N&3t)H!>0n zhQ}NAA%F;j(IJ{n*(0Jhx<%7t*lO1Q;wS|R{j`6Ry6A^-1?hUbDhf|nX$ZfY8#ARp{Q&mD-4{)$gof*ZeETI6);Bca6L;d{5Pj;R7QSrAq7T zYsW53U9kt(!_M zxWOG|5qBf}ObIgtp2dmiU(70IxK$UzJD4_)N|~$X7YOE&lo~ZHX2%kM`eR~%o@yIK zkD(>--7IOkoekwB!jvDL^{aVH=I^z z0^tUbF~58kD3rSJ+`LY0cLrPCR!FJ(JtIqlAsDg zSYW4Dz`gTeo=&{4G+mUdXcZTbHVGs>nu+22&O_DhK(RtJL$i2uMOwqCL=m$UWGphaBl97f8H01fw!d6-v zwsXP;F*e9U>}OQDn2L)YT2wXD+En7r`pBf8F2IvXPEO8G{qc`WQc8-pR6WDA5O3C` zo<(;NJVMudjnNJz1oO%HFp~Pu5?~>dVVj zsBRdMa6%B*wJuqvzrUupw|8K^w5Zy@J}JiE&ro60qoNJ8e$gFug^P}v>rCR zY5pRmF`!?lj_MHM4ltKoCx2QE(^Et~21eY?{;{P}SRqui^etmi>i_(x{y6HpB==CJ zgI`7x>z%^Inus0Al*2EGAY9x*)7VIFCg>PT8mTx6(!O(-s)kWXCm5~=%}qt){4#!( zgA7l({tm80u=whTR;4Fgg832#gOwGsTF`JzBpK3h1R6<7c=TnNBR26^HdcdK&DFCtjbR*>zhdQC-wB(%aWpoaRiE3<-c9H< zY}ux~X5ff;z1x~QCA@hz;3G zX1^c>e4$_EFjjn{Ce}$eAcT}?IKCdykz81h*`@N;wi-2JZGW^_`k$F+yrU&2FqHLu zyFxW9h3(&^VH-})+)VQoZ@kQSfVbC-_Bb)#(O3jQJE~io`N82Dk#kuD$+&<=A5m+M z*=Tk1BUjOw0>yhA8)l0iyHz(xGpIHv2ws2YQVCOrAFl3F>O^Nr?c=Q$`F`5#L(&IT zh0xa?Kb@yHwzdWm+&M|}Vi>NSl62HP_ip8ADk~SrWMs`{n#tF8x~y%zA@f@rg`XA_ zlYKB%T++=rW;0`=9#-Fmro}d|iGBRnnm5GH2w1*|OaF;x*eUu(-<_5ts&DRlY{3ljxc@KJ;CF;l&s?MKooe&HN?MJ?$HvH%LlEZP{ zUwB$1DM|bE)%(6`mfBZNo2;%<&sRXtMp`D-?#UjR>j6@!dbuH|W zq-Qf>oMX8zq|4R${qrv6hcY1)y-1K!ZNYtstwdMo>-fM-P5KwE5>cX|2X^pN%``n^ zqoG7YS;5C)=GCL`C&BD4eEaA*(a>KvN*~eMBDYE-#GgzrYs`NQJn$yfDeazS*>SQ_ zV!lxaKi1y#h?q`yu<%n+k|V6%=&EOSeci<6)eX~NpWD-?PeIm~pDk3ahwo?l4EYbo zUi_2gCMm}|vb4dFSRIBD2713?{(Wp=jep&9s|J{gVs6wU>?qnakO|&x4(|=r%iw0_ z*+J|1hyW;PI{(t}A?H{}ygKt@bvT=fYV8tJ_c(R7k$*& zFoK#Old^gtFHI6py$l{)bvMB@il%hjS3m6az{kgV!?Tl(`J4BNUOhdqdQq>*PWPZE z@Sgr#r`3BUB)<3XtVcCbBUvLGJiFrXPbLgD7nY=O!Z+cVm;~I|H-HcfvUuC_0*rkF z*ulCX+z0l^AGvPLFDA~^afaVyVm|xR!*07|GMWZ#RPNVgzf_GBW4H|ygR%V9|2b{1 zui=DBoyBis(uyuaXQ6U9$R~w{KSTHs`*PWVzxOj8r8s?$TgG{Qz$QhPs$aCba083Z zw(2XDf~Gg64JhTe+i>2uOAsAD7|&AZ_-D7o;0`L)zU{ks+Xp4Xj`xTbR}HRdv|L~T@8xjT=F?C?|=M>TzLa8qmSX>lPC zaV(nT=JNQp-qE9{0csB;&nT)K2bF{ipUjdT$J~#!K zb8&J~dH=+rXsV(;UbH2mwk@gE?~i554wM&wcc~wRXeentkbeXPkOm?%xFEaq!~P-t zAIpvZTzvfRa-~D(4~W<4KgPk3_b#r^U{iZE;K{+v291N0f}P^=K}d*#O~TsMMHTG) z&cWW%0Ro1&Qt(l*y>qa0aMo}%H3w6$NrK(2&A~Fxrd||m($;paU}qq;Gj#<^g3TS? z0|2sMYfCFv3N9W_3N{TlGgmK1pis`<)DlQE|H|m9nAw2MT`Aaftlzs@QSfsEkcwc4 zCGgC_&cn`6AtHkI4~L!^N15o3FXvN_$7O?PkQ69>*Y*q(;SvW)d8Ym1#Hr+YMw(7k zNr^(pfzug4yFt%WM7u!rWed3Mi?P*gbiL3()qvPmihWJ-U-aF~!uMl$R>ufvb@GFMq z>+43bULfF5*hbuc;n^%{A=YU@&<*-%sFSB&F_LKb+#(bm!NfP1RIN;VJ0!rI6@jkq zXIY9S(}tm?`vgJ(x9Fd@7PCV>0+WZGh_c2;VG8w}RVm~G2-fmz^iIP`yYUir-N+Q= zj1hN3JVeJ2?B2)-vZ6e+1^TH`7ROHz_WCM=!3vu87s?2+Mr7+0pu7k01q2~hxwT)a zK{9l76&GO&SwsdzOy-0r`$DMF{_Y0}VL(CbRpzFNqYiNZjcT7(gQRCPdcfMt5Bvvn z5xm6+>}irl5R7qpi{lpvUzPu8Kk>av$KZL`uJRDa_;b#i-b7D{6$>Q_<8i>ljAg;K z1i?360VilqLNTYKPfn7^@`gY@e#@&@3AQ&n#4pTAs>5wbVxjLRy;tXmHBa=T_xyWu zh^7N%_=*s{6kfG$Rj8QF3H!E^b@03OJyvl2sSRkQ{jz#ON2Fcb$#O@H8Nq=`G{}NE z0OAbGfPPF5!S@MT-q9O=EN9qBe7?!@RdS4U{4&n_`;qV#ITBLp$>EQ`i$_4(XW6_l znM|#fj%64aoLg%iO&?on<3D>Xn^{e^X5CUhpd!GQfP6$Hc8;gD%>5{Gb3Of+6Q1QC zwfyIE^h9J9^5%Jv=XIB`G0CCj7Nlguh(GCAuC^*vTSufp!}lcDyyUxcS&{?GKHca$ z)6{Jyo*9f7kM)~J^C5)ogx9>Jf7Pegc*KLRR@4YEVz@p9LXN`xR*`6P&%_5h2F0Kk zlA;a@ajG|X|ISIbryp7po;Yw$R0vOwgWkQ6-#H27qeKlXV*aaS`E|VN&Fe4z@sFq= ztF>|JLf9&?u?eyoF9CA9+egT9jQ7X`9ZiqOc^&#XSZKR9q8KR|zZ5Y%A{bo`FL?Q& zNEPPKNO-OClFK!sl{Ol(dUerN++QSl;$LQ~!g`WTWU))&r9cL2ZA(j7$J=)-Sl6CE zg1UTpkYs-c9RRHZbSeNe+7g&Qsw54emw(T6=2J5O z&P3XjC>`Yb4mT>Oac3T&aAo-dMtLm>RI$~O?{o7|-WJIG=9&N_u3}TW)ar}Axy$&D z_}ZA;I8@W2yZ4(criy-kEmU9rbR-o2L6Hl4E5)&O&2o~&?rHp{t?O3(;@L5MF)z?K zjz#Q*Pk++UfnGQ}-IkNs`vp(K1TrIct5Q$@7HQXI;nZWlq}&Sk)BaoNLGh{r?b&ub z0FJO&=r{%&*&Tk$V`oo@s(yG1Tf#f*PhJHC`s@x~^cSJJj%$xE{?kAo$Q87Xax@+; z4s`4R05oQ+k!|@SI}py3Y#gnOI6$G3|JFHBtp90PioZGT$f}1R9*2XT4kq(88a5lw zNj3%UsRQ%J#aoCa)z0XEM?zC;I&)ojCxu29dF$hlNF4BDYP8nv;zjq|>R71;hKjS1 zp4e)fJewESPo^QQl@rmS$5OSGDF8a&=p^YpQGCm<_h4UpJyM*b2+{Hi3=BD{wr%@N z@#j?=DG%yFW{OYc`Trh>J4YWWxBQ0c5zBdK`revw&%dQ`7^Dhsy_<;r=sy5O6#7m7 zILzk5x30OpMcTN4^faNITz-&he6?*}omFP6N03;&2Ca#zg)v{Y8y3zf>;P*Gnj~P9`J@u)1 z-Q{$aPey9wVG8;Z5EUWco`T!yq~>=M2kEycK8|*K&6Ku?3)(G}#7Um@zF!Sai-*GB z*pj2hq84-Pd!i=o&9fE-tt_{RGsxn_mt_OI`nHRv4)5x?68pCM>?U|#bp8OMinjT? zOSALvT2QALcDVQbXix)R+%KlSoM%~*ej)~)d*1x~1JMf4v%1Uiv4TcmA~={I`=|K7 zCA6r`70tt8r3=FZBbCO2PhtAWG`n+MQU6b#FoXW7a1Q9;3GB)LF3H>KJ3QXDJP$Kn zBl?K{4|YOY-X?DmHclQHVXw8a`mX%v{#Wn(eqA+J8BG{+7n%HvS*h4hj@9vrz;thTwI!V~V6X89=U;uZOR$Rw(#00`v8r<1{J_%YoUS6%8nBHopL{UCg; zO?Fz{IYHe{zjKpRtNo^`z-z?C&x^@5-3CxXDSFFf8#zK91L$5!{qiLzJJ3n0X*f?I^N{Q>^094k@>&1L>JI5k_yNMJ&$;4;R z{xuYj|6@$4gY6(A(vRJui0^E!hXGgvl*capDgj}R=mKoVUz)D4j zR;0=_^sHn0Cm@KA`10qN&2Lfi2K_xF$&CJSD&D&PGT2Bm#RjyeTNse;4?5yiNsZ_GK zew9?YW!5Ufwk?e9=}7098%E=j2O+6bay!!B_%gNH$51+-wbsdL(!nO zbl7=Y=zYxqf@Fr3n;4v5%%=QgEana8@X zuW7@=BH9LT^K^3`&r7p!VKARHZQNOo-!^GHzKqz`xLqbJn#bq~ zjl=w{S{BoRwtj~-m-nj7t6d%{K&PoCq1`139GA_UB>T=vV+_2`&qfEb1sr;b7Krn>UHPB$vG?5U?k!BB+ z0!qr}(X;mKj*vObyyQB9>8oCmo%g@RwP6gSgSg3-<+-0uBuXK-pGh+!B#XZ(LLtU> z{w6|E_T}LCUc>V3gaVLzL*;{`lYA^>nb)d0Q(0>9INlZ^ir2K*UV;cF|3JRV^=vA3 z;$Zn})XY!6Bw^}IItNQB5ne$hkxNCdG4~ha*?3!&T2MQ#u(OQTNLK=(ai#-zS;4Rt zy;Oz|HrDHAr4=G$7GaWZsihEwtweayRw;x$N)p+ZUXKZ@u{n6xeVwtUNwQt(b)#vA z7qU6XNMk1jpPD^X6~;;)m$~MZP~(TUNtlz-6d6TQHu{EWi0oZZGC6x!A68 z%IwjXIVF0SXV4&3@b28Hxaq^*$)eMb5Fm888Aa_47YsbF{k9#duEnn423}8_bpJs_TH7y3l(5A}Q1n3KE!wMM$(I zUpE^eS2dCpjU;s4tU=XvHp48C^kUpwTBWFaiEKLWnFUT1r0gIedyZRK{U-=gj@^=S z@7rRFK;rctt;4XXJ;0#Z#8H7n@l9=BG=yvW;y&AN zS3?F+w%l((hK5lvOs={Oq4Rv7HY0gj=oPRsmk>jCg63h&^(H>LMT#V z84BeQPS*lLQschA7=#dK`47xr2iJzl==a8ib1nN#@@sPtK3@!8NW)U8kH@^4I5p*R zB}HxBK{>+-_AdUag0e+9yRMEC`k!!NhOFUCQLi6Z1?APFM;;89Ec^F-hygc?}53xBW2JVMP4w)y=P z0xsV!C|$<8B`!;Ly+s_(aqd%Y#bdvfZ5Cb;>EZy1fQ%$rc|OF%&#{jhy!6~(pvZI+ z-EqH4VlPI4Nk0MBvB9>aOL!8f0E506HN5*;u<&6bZ@sCb584HdAkKbrsBo4^+fy6Q`>vYk$*fk10%zc%nkiHrHs-U9>s%zOQw{p5}4(X*>Y^ z-89s>qlly44c~CcVz0*2R~Pr3l2A*JbH*IGDaWM7LATAua?oE*YFrk!7+8$a?f(6z z*wL31_t6Aa+FPlh6arGu-@S;u`O{DxU-BEnR)1LcYjR1uO6MVUCdYWPa>#6`^g-Ct zEA6by((SxXjebO>sq!y&4YKVK|8TR=kSk1?K)iZ-sxlS_L5@AmT@#)kVLB?tY(l%6 z)^t((@il)V@Wpja{sCocIw3JYJFvmjFqZVMhL`}W)Ewlc5I1p1z$+`#x#vxYA9Cz< z2{(~;0}uXQ?@#cQqqj>vIRloy=)e^9Ptti_;T8x9e8sn_Px#N0@&6)uMEf`Kf6??b z9%=@rA?$CM2On1ugdd4HKRpv$u4d?-S-}8a<4oineQlLaSh-d3N{e{?vzBjbDegLd z&*jamH2t+?@E(bNs{3MhE^iZ-bj#8j)y;weEDN-3nm;!?Ha<7)ITk?_y{v7jo-#+JA-o>O*I;#8#59 zjvOt+VIH(RvXH^a3ipzeLpO=IkKl9iNU!-Ka`xS!a;5nO3W85xLhs$r=Mv$qs%Cb9 zE1zA?!D|21T zT;-RkCHaAocH*9Axy3_%M3GZpGs53<|Bi7St0+OO_OJICkO!>g73 zJKAB%gQNc!Ow+Fy-KXp?7R+D6HIIIf8-vKizx`%1`T-K&d+usW#BTuKXnD86*ME6x zmA!)Gk_ab_c{0BL$UyfMKV0$7VfFVNV!Y>COY;GIj8k77U2pL7*88k?(%%E4H&BAur8|U}pKJhwv z8y&bVvIvnSuZ3AdlZPWsoAo_vyAb=jP;c;=^-0PH%gFt(C((EZ%{ z$^EnK^7j?=I2Ienm^K^+$(N^NtZ}m{2nPI!kw>l z+mfF|kvsVpZ!0O|q6n2S!af^AFUjdjfiXP}rDCtC{=)bEZOY!yQ;teCi5bV6Z>~8X zV#X{v8@?ZS^RUPJ2MDj(G5#ow%ZiM+ZE?hQ3K#pKr?e;iq#qPoLZtad;-`nmB1?o% z8}<=oq|t-n2{MU!q^&s2H*z$g3SBT!yQ7}dC!jAD55CjmwgyuW=Y7Dp6uZJI!%bLRI-CnPLH5TCOao8v1b5Ferp%F7@HTU^=WZ9Kv)Z*#vU_ppc06+eI|%!t0%Vkfl3vNeCE&;JA?DvuHm)&JA(RGXqz_ zFUt6@b3#MZCu6+BZNH$``+H~X1jM97NVh=P@tJ#z> z!}8pb>&fRVwTm{d$63lF(=y^93RX1UiPAQRrKYsi8@NO=9-c)zoX19JA9X+ZwH=}p z!Yt%ucnR(Cmxw_U<;U@hwpO*}%lSucZta=MHKr*ipYYwpv=3(1*OTgoCh(>@;`@J% zq@d;JZPS*-@ocY?d6|p8mY;&>404&FvYKQ1^@w^EPgNlP9hADz%gkJKy%Q?&W*BUw zry_E8#`23ZLzeWlO_M{cWn=@Y9#loH8Rmb(;p~d+uF9k`RZ$lv!faW*xxFoz_q^m2 z7$rm(4Z}=dXXJMd+$JLSGbF})t0Po~soCu_zPD5;bHt*Vk-r|4?JM*nVNCp z1&TsFVd>Yh{+S@%vNM;Cn4!X9LqhXaa{T}sUDU%y5p6(i63FMxB6mo}i`AdNo8{SF zOdhe7MAoR;6URZa6Dw^(#|pqxTAOzcs%z?LaA<}5M7(Rxd&Azl_!>-Z1+EI~O^vdzgsmslAQz9*oO_3;h>j9? z-;|^iDd8>J44E9y)J6QHjThNx8|@fp%BHo{(&Tawr7LU95PoyluP(ds4PiK;`US*e ztvU!$^Q48ug4j)>@4rimozx7k(Cuo(p!nlexC^4ZQPS@AK*mCc*Y*y(vX6f z6Xm!@==h!4?DFzf?`$9?d3*MZ`T^)HNodom2>mtp_!7MNTC=7#Qs90;PC#H$0Ua3{ z*OGtqH4p1VNM}ZjK6D^L=m@5N$)~}i<{KO>JS8t^>DgP$PgT#rn|={@dANx5r6Hpr zWNqr|!nu#f(|BL^2)jDwg|=A~tI#K%#?{0Zm^Ck-aqJ$XtX~gPWPL&xDtiMfkuTZj zdYypn2v~cZEww}6MSkMQb0<#A5F0|4T_(1~>_V1GjI%FAa>W&k`#1+|*!-E#@9p1g zz+S#@+|2%-)6b*aZ*r}!4U@zfyX{6!rG)?52gTu`sA3Ky*qJh-Sg12QD8W*C>g@s@ zz=ecbLYzPHXoYyp{SO~2A!k*L@U(8|u6%o9qLJo>QAgW)N$#o715*Q|XkN$Z$moNd z^axje8?W2BOol?%cwOaG)HQ&^xhbQKE%9cdM;&Kq;em9t=_E?@r3>4&Li=o@vd^F} z@!p94sZz9Q(#KbSK*ymgwy#N4S(Otu=$588Mbf;63q3-o3+Hz<;j71C+bT{{#L`3| zXBvJt$vqvY&#`#m96l~C6l(!_;Mw|gTG(EP4JcsBTP~M z#8r%jo5a7gig)a@%U^mF4pj-oGIdwC?H0ZROi(q{qL4pN6NcEOi|Cf4#fy z=y1vA=oJuM(>J&7rdKmgY~C&nS#lTl8mfBxX=~(awc#i=h4fV6o{P@rDYYi7HsqNb zY0K!PQPz4|1|eQFJwe|+x*;}cpW6-L(+(W4Qs8 zNnNUQLMwb>fraGaw$J9qazph7IGdkoldoq25&fu@7!q8Wsx)4gf@BJgN;XSsLKb}R zT3Y7V=H`*80q%2qx^j})43E7+_xsKj*qnUl*Nq5T2kMsdXLfp@PtG0$qh|#LoisGb zForBe)LVWX%T*@3J1A<)^MR#n=*%-{v+Eq~Mcm#*`#r4Nn%yFFkYCW>YF#peW>1=T z5^DRNo)<68)uz(hvUze6lNzf;A;0x6Z&d{D;d`gt2(55nJ9V(0{^ZhMYvZFqSMOSF zK2uxsfIOSYL<2)Rs=XDPvxipj6gRYkNnPx$8&ySx2J%P`=d=!6gzeh>@EliKJC*7m z?&>7hKPNjSD$PTT@EzcVEzsUF-YOm~nu9r_ABr7gU!Dpr-?A2~)pEoby2gOSd-r=j ze$ZzbfNu(Ue#R_Bb1zTAzry0jo*b9Aih^T-TEyJOn+ep1!`)6uK4pRJknZdvbqw-^ zc8Qb={Q&ky!hcHW1vVZ}NEF#yUq83RWnnEKwu^|tO!70bv1AgYfn}w728RqaV=mZW zcl0KyckL@%_BBP92T1WQRFnw>yt8kI(fw#lJzqV(H~yY>cLVo$@DckS7LokDsqBr1 z=5dJu_xx@$b6{Zk+3c*FfeGH9X4v4(w2%Ed(>}kWsQ!p<{~Y2OUSD*qOPxwHhMz`V zw?$*!+spExBo1(tR05OXw2{e<&FqW!Fq?Z_aH?3YU7?--e8WsbV2zA38ckQFj+>yJ zvN(%eGfN_sKFdMFvUa-4hTb;K+&zyula)nh9_i-X>|skT4;+H$D8cuFb`1r=Q=x6E zeD?w@1@+zQU+S?ZA5!`1V_V`o?5PK}wF)j@ju3n+c(@s_=rYdv!NAZwzkGS))!dbB z{WCpiny9uOohpM%I3bzh@(ae-`nz*S`OM}zFl{~P)NGA+NBJ@Ybo7#h^5JtFB*I$i zUkp(4zynZ;5YkT*7Gi3wpW?&XH|HyO)TWQbmM9L~DdhT_orkpD$`(!G`dx1??Vyn4 z0k}<>E!JekcFg&K_Oi&btEPHc11&r>60FRY18lPu*UhJqMZAT5;2ml{YM3RM*6#}u zS(yC3V3kH%y^GZ3#(fJZ8HFxC!w4ANQ`sqEDL0?qqny(gWPs*N3!wWNq+g}U7;gKd zmvuCbFD@ieW(T%2pyq2#99ADqomcH=jTUSv>g9K+2r#*c_K&^Rp7)a#2>z!VsM9&& zxai+l+GK2t{n$kmeBKGXv4G1A_r0yZgvnfgdiy0Y{JOj*+HKh;69SJR%-X-@#55aj zT@)vPb>>*WG3&2^YqAil1@R^6WnN|$ZFrIHxY!)zDjKKSB$HB5YJ$2>arP)w{e_XQ(@jwG?Y=xfR^*&xRm zapcvztXR$mZ!Tb^s_70;pvRUshAx3%r0EPB3?@VA;oTpN-4l+gz48;fzE8wt2}B&X z8J1p~cSDT1=C%cADQfF=3#hirCd)hX5J43;6CD1E6Ub@aI5u!CyLNFp6NG>NOpvKa zrKS1w`ht|BAo!hq(k8NgdNU{$OL4SC6|VDg`b^{EwixhqGx6-Cs6ov;IE*7P?e zSz4nw2gmB}=_^dq)m_?K=UR3b=)BaCR1WIhF4eOGZ#I?c@XW(|k2pWLse_7nX20J3 z3vbg=dl-|P7l#`6j`Z7=(ydb-puL2e8%+7!b{zcmtY9z=>3pPCWN;YEew^ZN|3IsDBMKBU$Ib^m4?BCmJR0HCIJB!Ka_G=7TA@}3e}d9 zyVo2>i+ZhgBpt?U*;S_XCRw|1SCw}}EQ8IEyxRvwwQdIsoJWVt&a0PyQIbjy?Hp;k zKxn!vv_&wW6ozV1RiKi>vnTfRk9fy%1dUV`>q}*{>$_ihDK>Av+@rOxNZe%7u&)g7 z)izQ}E&Bj&X=x;1_N#1nD&YZem**zE=p7!OdF7rQ)=qr;)yN&E^e=H1{(*NoqE8Ww zUX|to&7T}Ub_e*aCBLBD5w^AFnO8-3P^q%3Wb(9`Q0#qVkF>R!Yebq#60@7--y`S0 zJje<>ynZj7H1*$|L%IHobLju*@cA#-O%9I#m*=Lwf-Ma%PWyP(1iy)%miitdW%a>4 z|FD?zxCP;p^w`mdP+8cKHRDwst#^4-k9nW;eq~ju<}pN_&cBku|5MLC)FHG38{CV> zh7R`^{c{p@!&S9UFdi-j z(c1MCSo4X?{ftkn1wSd%2|;AkdjH1SO;#jTaa+M3yxHHwf$m@uw?FPEdRa_N>odRW zZYGJ${nh(Lu{`=Qg>+eLuANhb>5SKs6F=$e8C9;OoLr7|pI6z<9vO2zlWQNqjU_t{ zSF%I%{P2ukuQQ*Hk~7!JdSl;$PG=1G;@!A&&?+oCsE)p2MW(aL#pE54eL00VD7vRs zp*<^fnobq%g)(WY1P%E|&kfK1`+$^il=KN{gdg22L!nOOm6pI4Al@fq$D|0YMekp% zzri(;pPHVSJ36@hO#5w8Y{z`*xI`)QC_Z$CKG^PngG8lvIk2Y~H*Ff}0F!eHiE4(( zq#ny{l1~a-MwU?3D=uKLlhmd{;H$xUQTNHghWHW^XUnfzLs!OnRB>*{n1c(P@BP+@ zoZmDH&^LDnYP-(6si1+BbPsu)Ce=0)R{7eV{Y@)q`Up=DiaeybP3>KK&P04gtJ?VS zgH_~i73(Xhwh6QqP&f8)S7;NGO@%)`p-}wYxE;79rNV6AD0Pm~C++SA+*(&6Ue2Jo z_5BIvc8mNWk1wXQgzm3^l&5VNThjs!Z%HNbVBejSCIm=HcT)CK6Y~qeOBZbLxNKrjbrO!^b4mo&u^{az19kP?k zik#2FLOk33W%yY~+epg$-gZUWnz5!hB?KRGC*nHHW`^L8+T+b1S$n#rJz&N^<8aVB zMrRZ(l-uGczS5b5x|rG)&%sr&_s{hB)3b^-`z7qMAxDK0^w% zxrE`J;r#T}GoByIsD>&7r6FIVFwYO@c6;%4Q>c=pU%Pr>#)6Wh;crrj;eJT>R|(U_SG=oIGG|k^gT9!~bx6H#b8-q?7KukHS#Iz=-4&i*wrY z!b$H%mE`;yG*B&q!$^lp<&_+yHS|$)n<;RCgh_1k`#1$Ii(d6AJ_bSnIV$&~fB%1a f`cwSh{{RnsQ)gFCXRrktCkGE2Ev=M_G}`|G;RX>D literal 0 HcmV?d00001 diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java new file mode 100644 index 000000000000..430b46efaadc --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java @@ -0,0 +1,591 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.RetryCounterFactory; +import org.apache.hadoop.util.Shell; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/*** + * An agent for executing destructive actions for ChaosMonkey. + * Uses ZooKeeper Watchers and LocalShell, to do the killing + * and getting status of service on targeted host without SSH. + * uses given ZNode Structure: + * /perfChaosTest (root) + * | + * | + * /chaosAgents (Used for registration has + * hostname ephemeral nodes as children) + * | + * | + * /chaosAgentTaskStatus (Used for task + * Execution, has hostname persistent + * nodes as child with tasks as their children) + * | + * | + * /hostname + * | + * | + * /task0000001 (command as data) + * (has two types of command : + * 1: starts with "exec" + * for executing a destructive action. + * 2: starts with "bool" for getting + * only status of service. + * + */ +@InterfaceAudience.Private +public class ChaosAgent implements Watcher, Closeable, Runnable { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosAgent.class); + static AtomicBoolean stopChaosAgent = new AtomicBoolean(); + private ZooKeeper zk; + private String quorum; + private String agentName; + private Configuration conf; + private RetryCounterFactory retryCounterFactory; + private volatile boolean connected = false; + + public ChaosAgent(Configuration conf, String quorum, String agentName) { + initChaosAgent(conf, quorum, agentName); + } + + /*** + * sets global params and initiates connection with ZooKeeper then does registration. + * @param conf initial configuration to use + * @param quorum ZK Quorum + * @param agentName AgentName to use + */ + private void initChaosAgent(Configuration conf, String quorum, String agentName) { + this.conf = conf; + this.quorum = quorum; + this.agentName = agentName; + this.retryCounterFactory = new RetryCounterFactory(new RetryCounter.RetryConfig() + .setMaxAttempts(conf.getInt(ChaosConstants.RETRY_ATTEMPTS_KEY, + ChaosConstants.DEFAULT_RETRY_ATTEMPTS)).setSleepInterval( + conf.getLong(ChaosConstants.RETRY_SLEEP_INTERVAL_KEY, + ChaosConstants.DEFAULT_RETRY_SLEEP_INTERVAL))); + try { + this.createZKConnection(null); + this.register(); + } catch (IOException e) { + LOG.error("Error Creating Connection: " + e); + } + } + + /*** + * Creates Connection with ZooKeeper. + * @throws IOException if something goes wrong + */ + private void createZKConnection(Watcher watcher) throws IOException { + if(watcher == null) { + zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, this); + } else { + zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, watcher); + } + LOG.info("ZooKeeper Connection created for ChaosAgent: " + agentName); + } + + //WATCHERS: Below are the Watches used by ChaosAgent + + /*** + * Watcher for notifying if any task is assigned to agent or not, + * by seeking if any Node is being added to agent as Child. + */ + Watcher newTaskCreatedWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + if (watchedEvent.getType() == Event.EventType.NodeChildrenChanged) { + if (!(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName).equals(watchedEvent.getPath())) { + throw new RuntimeException(KeeperException.create( + KeeperException.Code.DATAINCONSISTENCY)); + } + + LOG.info("Change in Tasks Node, checking for Tasks again."); + getTasks(); + } + + } + }; + + //CALLBACKS: Below are the Callbacks used by Chaos Agent + + /** + * Callback used while setting status of a given task, Logs given status. + */ + AsyncCallback.StatCallback setStatusOfTaskZNodeCallback = (rc, path, ctx, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + // Connection to the server was lost while setting status setting again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + setStatusOfTaskZNode(path, (String) ctx); + break; + + case OK: + LOG.info("Status of Task has been set"); + break; + + case NONODE: + LOG.error("Chaos Agent status node does not exists: " + + "check for ZNode directory structure again."); + break; + + default: + LOG.error("Error while setting status of task ZNode: " + + path, KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used while creating a Persistent ZNode tries to create + * ZNode again if Connection was lost in previous try. + */ + AsyncCallback.StringCallback createZNodeCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + createZNode(path, (byte[]) ctx); + break; + case OK: + LOG.info("ZNode created : " + path); + break; + case NODEEXISTS: + LOG.warn("ZNode already registered: " + path); + break; + default: + LOG.error("Error occurred while creating Persistent ZNode: " + path, + KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used while creating a Ephemeral ZNode tries to create ZNode again + * if Connection was lost in previous try. + */ + AsyncCallback.StringCallback createEphemeralZNodeCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + createEphemeralZNode(path, (byte[]) ctx); + break; + case OK: + LOG.info("ZNode created : " + path); + break; + case NODEEXISTS: + LOG.warn("ZNode already registered: " + path); + break; + default: + LOG.error("Error occurred while creating Ephemeral ZNode: ", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used by getTasksForAgentCallback while getting command, + * after getting command successfully, it executes command and + * set its status with respect to the command type. + */ + AsyncCallback.DataCallback getTaskForExecutionCallback = new AsyncCallback.DataCallback() { + @Override + public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connection to the server has been lost while getting task, getting data again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + zk.getData(path, + false, + getTaskForExecutionCallback, + new String(data)); + break; + case OK: + String cmd = new String(data); + LOG.info("Executing command : " + cmd); + String status = ChaosConstants.TASK_COMPLETION_STRING; + try { + String user = conf.get(ChaosConstants.CHAOSAGENT_SHELL_USER, + ChaosConstants.DEFAULT_SHELL_USER); + switch (cmd.substring(0, 4)) { + case "bool": + String ret = execWithRetries(user, cmd.substring(4)).getSecond(); + status = Boolean.toString(ret.length() > 0); + break; + + case "exec": + execWithRetries(user, cmd.substring(4)); + break; + + default: + LOG.error("Unknown Command Type"); + status = ChaosConstants.TASK_ERROR_STRING; + } + } catch (IOException e) { + LOG.error("Got error while executing command : " + cmd + + " On agent : " + agentName + " Error : " + e); + status = ChaosConstants.TASK_ERROR_STRING; + } + + try { + setStatusOfTaskZNode(path, status); + Thread.sleep(ChaosConstants.SET_STATUS_SLEEP_TIME); + } catch (InterruptedException e) { + LOG.error("Error occured after setting status: " + e); + } + + default: + LOG.error("Error occurred while getting data", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + } + }; + + /*** + * Callback used while getting Tasks for agent if call executed without Exception, + * It creates a separate thread for each children to execute given Tasks parallely. + */ + AsyncCallback.ChildrenCallback getTasksForAgentCallback = new AsyncCallback.ChildrenCallback() { + @Override + public void processResult(int rc, String path, Object ctx, List children) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: { + // Connection to the server has been lost, getting tasks again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + getTasks(); + break; + } + + case OK: { + if (children != null) { + try { + + LOG.info("Executing each task as a separate thread"); + List tasksList = new ArrayList<>(); + for (String task : children) { + String threadName = agentName + "_" + task; + Thread t = new Thread(() -> { + + LOG.info("Executing task : " + task + " of agent : " + agentName); + zk.getData(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName + + ChaosConstants.ZNODE_PATH_SEPARATOR + task, + false, + getTaskForExecutionCallback, + task); + + }); + t.setName(threadName); + t.start(); + tasksList.add(t); + + for (Thread thread : tasksList) { + thread.join(); + } + } + } catch (InterruptedException e) { + LOG.error("Error scheduling next task : " + + " for agent : " + agentName + " Error : " + e); + } + } + break; + } + + default: + LOG.error("Error occurred while getting task", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + } + }; + + /*** + * Function to create PERSISTENT ZNODE with given path and data given as params + * @param path Path at which ZNode to create + * @param data Data to put under ZNode + */ + public void createZNode(String path, byte[] data) { + zk.create(path, + data, + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT, + createZNodeCallback, + data); + } + + /*** + * Function to create EPHEMERAL ZNODE with given path and data as params. + * @param path Path at which Ephemeral ZNode to create + * @param data Data to put under ZNode + */ + public void createEphemeralZNode(String path, byte[] data) { + zk.create(path, + data, + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL, + createEphemeralZNodeCallback, + data); + } + + /** + * Checks if given ZNode exists, if not creates a PERSISTENT ZNODE for same. + * + * @param path Path to check for ZNode + */ + private void createIfZNodeNotExists(String path) { + try { + if (zk.exists(path, + false) == null) { + createZNode(path, new byte[0]); + } + } catch (KeeperException | InterruptedException e) { + LOG.error("Error checking given node : " + path + " " + e); + } + } + + /** + * sets given Status for Task Znode + * + * @param taskZNode ZNode to set status + * @param status Status value + */ + public void setStatusOfTaskZNode(String taskZNode, String status) { + LOG.info("Setting status of Task ZNode: " + taskZNode + " status : " + status); + zk.setData(taskZNode, + status.getBytes(), + -1, + setStatusOfTaskZNodeCallback, + null); + } + + /** + * registration of ChaosAgent by checking and creating necessary ZNodes. + */ + private void register() { + createIfZNodeNotExists(ChaosConstants.CHAOS_TEST_ROOT_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName); + + createEphemeralZNode(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, new byte[0]); + } + + /*** + * Gets tasks for execution, basically sets Watch on it's respective host's Znode and + * waits for tasks to be assigned, also has a getTasksForAgentCallback + * which handles execution of task. + */ + private void getTasks() { + LOG.info("Getting Tasks for Agent: " + agentName + "and setting watch for new Tasks"); + zk.getChildren(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, + newTaskCreatedWatcher, + getTasksForAgentCallback, + null); + } + + /** + * Below function executes command with retries with given user. + * Uses LocalShell to execute a command. + * + * @param user user name, default none + * @param cmd Command to execute + * @return A pair of Exit Code and Shell output + * @throws IOException Exception while executing shell command + */ + private Pair execWithRetries(String user, String cmd) throws IOException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + return exec(user, cmd); + } catch (IOException e) { + retryOrThrow(retryCounter, e, user, cmd); + } + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException e) { + LOG.warn("Sleep Interrupted: " + e); + } + } + } + + private Pair exec(String user, String cmd) throws IOException { + LOG.info("Executing Shell command: " + cmd + " , user: " + user); + + LocalShell shell = new LocalShell(user, cmd); + try { + shell.execute(); + } catch (Shell.ExitCodeException e) { + String output = shell.getOutput(); + throw new Shell.ExitCodeException(e.getExitCode(), "stderr: " + e.getMessage() + + ", stdout: " + output); + } + LOG.info("Executed Shell command, exit code: {}, output n{}", shell.getExitCode(), shell.getOutput()); + + return new Pair<>(shell.getExitCode(), shell.getOutput()); + } + + private void retryOrThrow(RetryCounter retryCounter, E ex, + String user, String cmd) throws E { + if (retryCounter.shouldRetry()) { + LOG.warn("Local command: {}, user: {}, failed at attempt {}. Retrying until maxAttempts: {}." + + "Exception {}", cmd, user,retryCounter.getAttemptTimes(), retryCounter.getMaxAttempts(), + ex.getMessage()); + return; + } + throw ex; + } + + private boolean isConnected() { + return connected; + } + + @Override + public void close() throws IOException { + LOG.info("Closing ZooKeeper Connection for Chaos Agent : " + agentName); + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error while closing ZooKeeper Connection."); + } + } + + @Override + public void run() { + try { + LOG.info("Running Chaos Agent on : " + agentName); + while (!this.isConnected()) { + Thread.sleep(100); + } + this.getTasks(); + while (!stopChaosAgent.get()) { + Thread.sleep(500); + } + } catch (InterruptedException e) { + LOG.error("Error while running Chaos Agent", e); + } + + } + + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Processing event: " + watchedEvent.toString()); + if (watchedEvent.getType() == Event.EventType.None) { + switch (watchedEvent.getState()) { + case SyncConnected: + connected = true; + break; + case Disconnected: + connected = false; + break; + case Expired: + connected = false; + LOG.error("Session expired creating again"); + try { + createZKConnection(null); + } catch (IOException e) { + LOG.error("Error creating Zookeeper connection", e); + } + default: + LOG.error("Unknown State"); + break; + } + } + } + + private void recreateZKConnection() throws Exception{ + try { + zk.close(); + createZKConnection(newTaskCreatedWatcher); + createEphemeralZNode(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, new byte[0]); + } catch (IOException e) { + LOG.error("Error creating new ZK COnnection for agent: {}", agentName + e); + throw e; + } + } + + /** + * Executes Command locally. + */ + protected static class LocalShell extends Shell.ShellCommandExecutor { + + private String user; + private String execCommand; + + public LocalShell(String user, String execCommand) { + super(new String[]{execCommand}); + this.user = user; + this.execCommand = execCommand; + } + + @Override + public String[] getExecString() { + // TODO: Considering Agent is running with same user. + if(!user.equals(ChaosConstants.DEFAULT_SHELL_USER)){ + execCommand = String.format("su -u %1$s %2$s", user, execCommand); + } + return new String[]{"/usr/bin/env", "bash", "-c", execCommand}; + } + + @Override + public void execute() throws IOException { + super.execute(); + } + } +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java new file mode 100644 index 000000000000..54fbe9b10cde --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import org.apache.yetus.audience.InterfaceAudience; + +/*** + * ChaosConstant holds a bunch of Choas-related Constants + */ +@InterfaceAudience.Public +public final class ChaosConstants { + + /*Base ZNode for whole Chaos Testing*/ + public static final String CHAOS_TEST_ROOT_ZNODE = "/hbase"; + + /*Just a / used for path separator*/ + public static final String ZNODE_PATH_SEPARATOR = "/"; + + /*ZNode used for ChaosAgents registration.*/ + public static final String CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE = + CHAOS_TEST_ROOT_ZNODE + ZNODE_PATH_SEPARATOR + "chaosAgents"; + + /*ZNode used for getting status of tasks assigned*/ + public static final String CHAOS_AGENT_STATUS_PERSISTENT_ZNODE = + CHAOS_TEST_ROOT_ZNODE + ZNODE_PATH_SEPARATOR + "chaosAgentTaskStatus"; + + /*Config property for getting number of retries to execute a command*/ + public static final String RETRY_ATTEMPTS_KEY = "hbase.it.clustermanager.retry.attempts"; + + /*Default value for number of retries*/ + public static final int DEFAULT_RETRY_ATTEMPTS = 5; + + /*Config property to sleep in between retries*/ + public static final String RETRY_SLEEP_INTERVAL_KEY = + "hbase.it.clustermanager.retry.sleep.interval"; + + /*Default Sleep time between each retry*/ + public static final int DEFAULT_RETRY_SLEEP_INTERVAL = 5000; + + /*Config property for executing command as specific user*/ + public static final String CHAOSAGENT_SHELL_USER = "hbase.it.clustermanager.ssh.user"; + + /*default user for executing local commands*/ + public static final String DEFAULT_SHELL_USER = ""; + + /*timeout used while creating ZooKeeper connection*/ + public static final int SESSION_TIMEOUT_ZK = 60000 * 10; + + /*Time given to ChaosAgent to set status*/ + public static final int SET_STATUS_SLEEP_TIME = 30 * 1000; + + /*Status String when you get an ERROR while executing task*/ + public static final String TASK_ERROR_STRING = "error"; + + /*Status String when your command gets executed correctly*/ + public static final String TASK_COMPLETION_STRING = "done"; + + /*Name of ChoreService to use*/ + public static final String CHORE_SERVICE_PREFIX = "ChaosService"; + +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java new file mode 100644 index 000000000000..e2abe3d42655 --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Collection; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; + +/** + * Class used to start/stop Chaos related services (currently chaosagent) + */ +@InterfaceAudience.Private +public class ChaosService { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosService.class.getName()); + + public static void execute(String[] args, Configuration conf) { + LOG.info("arguments : " + Arrays.toString(args)); + + try { + CommandLine cmdline = new GnuParser().parse(getOptions(), args); + if (cmdline.hasOption(ChaosServiceName.CHAOSAGENT.toString().toLowerCase())) { + String actionStr = cmdline.getOptionValue(ChaosServiceName.CHAOSAGENT.toString().toLowerCase()); + try { + ExecutorAction action = ExecutorAction.valueOf(actionStr.toUpperCase()); + if (action == ExecutorAction.START) { + ChaosServiceStart(conf, ChaosServiceName.CHAOSAGENT); + } else if (action == ExecutorAction.STOP) { + ChaosServiceStop(); + } + } catch (IllegalArgumentException e) { + LOG.error("action passed: {} Unexpected action. Please provide only start/stop.", + actionStr, e); + throw new RuntimeException(e); + } + } else { + LOG.error("Invalid Options"); + } + } catch (Exception e) { + LOG.error("Error while starting ChaosService : ", e); + } + } + + private static void ChaosServiceStart(Configuration conf, ChaosServiceName serviceName) { + switch (serviceName) { + case CHAOSAGENT: + ChaosAgent.stopChaosAgent.set(false); + try { + Thread t = new Thread(new ChaosAgent(conf, + ChaosUtils.getZKQuorum(conf), ChaosUtils.getHostName())); + t.start(); + t.join(); + } catch (InterruptedException | UnknownHostException e) { + LOG.error("Failed while executing next task execution of ChaosAgent on : {}", + serviceName, e); + } + break; + default: + LOG.error("Service Name not known : " + serviceName.toString()); + } + } + + private static void ChaosServiceStop() { + ChaosAgent.stopChaosAgent.set(true); + } + + private static Options getOptions() { + Options options = new Options(); + options.addOption(new Option("c", ChaosServiceName.CHAOSAGENT.toString().toLowerCase(), + true, "expecting a start/stop argument")); + options.addOption(new Option("D", ChaosServiceName.GENERIC.toString(), + true, "generic D param")); + LOG.info(Arrays.toString(new Collection[] { options.getOptions() })); + return options; + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + new GenericOptionsParser(conf, args); + + ChoreService choreChaosService = null; + ScheduledChore authChore = AuthUtil.getAuthChore(conf); + + try { + if (authChore != null) { + choreChaosService = new ChoreService(ChaosConstants.CHORE_SERVICE_PREFIX); + choreChaosService.scheduleChore(authChore); + } + + execute(args, conf); + } finally { + if (authChore != null) + choreChaosService.shutdown(); + } + } + + enum ChaosServiceName { + CHAOSAGENT, + GENERIC + } + + + enum ExecutorAction { + START, + STOP + } +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java new file mode 100644 index 000000000000..da42021bcafb --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * ChaosUtils holds a bunch of useful functions like getting hostname and getting ZooKeeper quorum. + */ +@InterfaceAudience.Private +public class ChaosUtils { + + public static String getHostName() throws UnknownHostException { + return InetAddress.getLocalHost().getHostName(); + } + + + public static String getZKQuorum(Configuration conf) { + String port = + Integer.toString(conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181)); + String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, "localhost"); + for (int i = 0; i < serverHosts.length; i++) { + serverHosts[i] = serverHosts[i] + ":" + port; + } + return String.join(",", serverHosts); + } + +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java new file mode 100644 index 000000000000..31fb9e3ca604 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java @@ -0,0 +1,332 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ChaosZKClient { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosZKClient.class.getName()); + private static final String CHAOS_AGENT_PARENT_ZNODE = "/hbase/chaosAgents"; + private static final String CHAOS_AGENT_STATUS_ZNODE = "/hbase/chaosAgentTaskStatus"; + private static final String ZNODE_PATH_SEPARATOR = "/"; + private static final String TASK_PREFIX = "task_"; + private static final String TASK_ERROR_STRING = "error"; + private static final String TASK_COMPLETION_STRING = "done"; + private static final String TASK_BOOLEAN_TRUE = "true"; + private static final String TASK_BOOLEAN_FALSE = "false"; + private static final String CONNECTION_LOSS = "ConnectionLoss"; + private static final int SESSION_TIMEOUT_ZK = 10 * 60 * 1000; + private static final int TASK_EXECUTION_TIMEOUT = 5 * 60 * 1000; + private volatile String taskStatus = null; + + private final String quorum; + private ZooKeeper zk; + + public ChaosZKClient(String quorum) { + this.quorum = quorum; + try { + this.createNewZKConnection(); + } catch (IOException e) { + LOG.error("Error creating ZooKeeper Connection: ", e); + } + } + + /** + * Creates connection with ZooKeeper + * @throws IOException when not able to create connection properly + */ + public void createNewZKConnection() throws IOException { + Watcher watcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Created ZooKeeper Connection For executing task"); + } + }; + + this.zk = new ZooKeeper(quorum, SESSION_TIMEOUT_ZK, watcher); + } + + /** + * Checks if ChaosAgent is running or not on target host by checking its ZNode. + * @param hostname hostname to check for chaosagent + * @return true/false whether agent is running or not + */ + private boolean isChaosAgentRunning(String hostname) { + try { + return zk.exists(CHAOS_AGENT_PARENT_ZNODE + ZNODE_PATH_SEPARATOR + hostname, + false) != null; + } catch (KeeperException e) { + if (e.toString().contains(CONNECTION_LOSS)) { + recreateZKConnection(); + try { + return zk.exists(CHAOS_AGENT_PARENT_ZNODE + ZNODE_PATH_SEPARATOR + hostname, + false) != null; + } catch (KeeperException | InterruptedException ie) { + LOG.error("ERROR ", ie); + } + } + } catch (InterruptedException e) { + LOG.error("Error checking for given hostname: {} ERROR: ", hostname, e); + } + return false; + } + + /** + * Creates tasks for target hosts by creating ZNodes. + * Waits for a limited amount of time to complete task to execute. + * @param taskObject Object data represents command + * @return returns status + */ + public String submitTask(final TaskObject taskObject) { + if (isChaosAgentRunning(taskObject.getTaskHostname())) { + LOG.info("Creating task node"); + zk.create(CHAOS_AGENT_STATUS_ZNODE + ZNODE_PATH_SEPARATOR + + taskObject.getTaskHostname() + ZNODE_PATH_SEPARATOR + TASK_PREFIX, + taskObject.getCommand().getBytes(), + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL_SEQUENTIAL, + submitTaskCallback, + taskObject); + long start = System.currentTimeMillis(); + + while ((System.currentTimeMillis() - start) < TASK_EXECUTION_TIMEOUT) { + if(taskStatus != null) { + return taskStatus; + } + Threads.sleep(500); + } + } else { + LOG.info("EHHHHH! ChaosAgent Not running"); + } + return TASK_ERROR_STRING; + } + + /** + * To get status of task submitted + * @param path path at which to get status + * @param ctx path context + */ + private void getStatus(String path , Object ctx) { + LOG.info("Getting Status of task: " + path); + zk.getData(path, + false, + getStatusCallback, + ctx); + } + + /** + * Set a watch on task submitted + * @param name ZNode name to set a watch + * @param taskObject context for ZNode name + */ + private void setStatusWatch(String name, TaskObject taskObject) { + LOG.info("Checking for ZNode and Setting watch for task : " + name); + zk.exists(name, + setStatusWatcher, + setStatusWatchCallback, + taskObject); + } + + /** + * Delete task after getting its status + * @param path path to delete ZNode + */ + private void deleteTask(String path) { + LOG.info("Deleting task: " + path); + zk.delete(path, + -1, + taskDeleteCallback, + null); + } + + //WATCHERS: + + /** + * Watcher to get notification whenever status of task changes. + */ + Watcher setStatusWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Setting status watch for task: " + watchedEvent.getPath()); + if(watchedEvent.getType() == Event.EventType.NodeDataChanged) { + if(!watchedEvent.getPath().contains(TASK_PREFIX)) { + throw new RuntimeException(KeeperException.create( + KeeperException.Code.DATAINCONSISTENCY)); + } + getStatus(watchedEvent.getPath(), (Object) watchedEvent.getPath()); + + } + } + }; + + //CALLBACKS + + AsyncCallback.DataCallback getStatusCallback = (rc, path, ctx, data, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connectionloss while getting status of task, getting again + recreateZKConnection(); + getStatus(path, ctx); + break; + + case OK: + if (ctx!=null) { + + String status = new String(data); + taskStatus = status; + switch (status) { + case TASK_COMPLETION_STRING: + case TASK_BOOLEAN_TRUE: + case TASK_BOOLEAN_FALSE: + LOG.info("Task executed completely : Status --> " + status); + break; + + case TASK_ERROR_STRING: + LOG.info("There was error while executing task : Status --> " + status); + break; + + default: + LOG.warn("Status of task is undefined!! : Status --> " + status); + } + + deleteTask(path); + } + break; + + default: + LOG.error("ERROR while getting status of task: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.StatCallback setStatusWatchCallback = (rc, path, ctx, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //ConnectionLoss while setting watch on status ZNode, setting again. + recreateZKConnection(); + setStatusWatch(path, (TaskObject) ctx); + break; + + case OK: + if(stat != null) { + getStatus(path, null); + } + break; + + default: + LOG.error("ERROR while setting watch on task ZNode: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.StringCallback submitTaskCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + // Connection to server was lost while submitting task, submitting again. + recreateZKConnection(); + submitTask((TaskObject) ctx); + break; + + case OK: + LOG.info("Task created : " + name); + setStatusWatch(name, (TaskObject) ctx); + break; + + default: + LOG.error("Error submitting task: " + name + " ERROR:" + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.VoidCallback taskDeleteCallback = new AsyncCallback.VoidCallback() { + @Override + public void processResult(int rc, String path, Object ctx) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connectionloss while deleting task, deleting again + recreateZKConnection(); + deleteTask(path); + break; + + case OK: + LOG.info("Task Deleted successfully!"); + LOG.info("Closing ZooKeeper Connection"); + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error while closing ZooKeeper Connection."); + } + break; + + default: + LOG.error("ERROR while deleting task: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + } + }; + + + private void recreateZKConnection() { + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error closing ZK connection : ", e); + } finally { + try { + createNewZKConnection(); + } catch (IOException e) { + LOG.error("Error creating new ZK COnnection for agent: ", e); + } + } + } + + static class TaskObject { + private final String command; + private final String taskHostname; + + public TaskObject(String command, String taskHostname) { + this.command = command; + this.taskHostname = taskHostname; + } + + public String getCommand() { + return this.command; + } + + public String getTaskHostname() { + return taskHostname; + } + } + +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java new file mode 100644 index 000000000000..88f14b0d0d34 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.stream.Collectors; + +import org.apache.hadoop.conf.Configured; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ZNodeClusterManager extends Configured implements ClusterManager { + private static final Logger LOG = LoggerFactory.getLogger(ZNodeClusterManager.class.getName()); + private static final String SIGKILL = "SIGKILL"; + private static final String SIGSTOP = "SIGSTOP"; + private static final String SIGCONT = "SIGCONT"; + public ZNodeClusterManager() { + } + + private String getZKQuorumServersStringFromHbaseConfig() { + String port = + Integer.toString(getConf().getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181)); + String[] serverHosts = getConf().getStrings(HConstants.ZOOKEEPER_QUORUM, "localhost"); + for (int i = 0; i < serverHosts.length; i++) { + serverHosts[i] = serverHosts[i] + ":" + port; + } + return Arrays.asList(serverHosts).stream().collect(Collectors.joining(",")); + } + + private String createZNode(String hostname, String cmd) throws IOException{ + LOG.info("Zookeeper Mode enabled sending command to zookeeper + " + + cmd + "hostname:" + hostname); + ChaosZKClient chaosZKClient = new ChaosZKClient(getZKQuorumServersStringFromHbaseConfig()); + return chaosZKClient.submitTask(new ChaosZKClient.TaskObject(cmd, hostname)); + } + + protected HBaseClusterManager.CommandProvider getCommandProvider(ServiceType service) + throws IOException { + switch (service) { + case HADOOP_DATANODE: + case HADOOP_NAMENODE: + return new HBaseClusterManager.HadoopShellCommandProvider(getConf()); + case ZOOKEEPER_SERVER: + return new HBaseClusterManager.ZookeeperShellCommandProvider(getConf()); + default: + return new HBaseClusterManager.HBaseShellCommandProvider(getConf()); + } + } + + public void signal(ServiceType service, String signal, String hostname) throws IOException { + createZNode(hostname, CmdType.exec.toString() + + getCommandProvider(service).signalCommand(service, signal)); + } + + private void createOpCommand(String hostname, ServiceType service, + HBaseClusterManager.CommandProvider.Operation op) throws IOException{ + createZNode(hostname, CmdType.exec.toString() + + getCommandProvider(service).getCommand(service, op)); + } + + @Override + public void start(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.START); + } + + @Override + public void stop(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.STOP); + } + + @Override + public void restart(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.RESTART); + } + + @Override + public void kill(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGKILL, hostname); + } + + @Override + public void suspend(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGSTOP, hostname); + } + + @Override + public void resume(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGCONT, hostname); + } + + @Override + public boolean isRunning(ServiceType service, String hostname, int port) throws IOException { + return Boolean.parseBoolean(createZNode(hostname, CmdType.bool.toString() + + getCommandProvider(service).isRunningCommand(service))); + } + + enum CmdType { + exec, + bool + } +} From 56f1dbd1165c9bb672e0388357415386dcd8bc7e Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Mon, 21 Dec 2020 08:08:22 -0800 Subject: [PATCH 285/769] HBASE-25425 Some notes on RawCell (#2797) Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/client/Mutation.java | 10 ---------- .../src/main/java/org/apache/hadoop/hbase/RawCell.java | 6 +++++- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index 6ade9eb8f8e5..ab6fc9475142 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.IndividualBytesFieldCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.RawCell; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.HeapSize; @@ -1000,25 +999,16 @@ public byte getTypeByte() { @Override public Optional getTag(byte type) { - if (cell instanceof RawCell) { - return ((RawCell) cell).getTag(type); - } return PrivateCellUtil.getTag(cell, type); } @Override public Iterator getTags() { - if (cell instanceof RawCell) { - return ((RawCell) cell).getTags(); - } return PrivateCellUtil.tagsIterator(cell); } @Override public byte[] cloneTags() { - if (cell instanceof RawCell) { - return ((RawCell) cell).cloneTags(); - } return PrivateCellUtil.cloneTags(cell); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index 85f8b278de47..d29e8ca8bdce 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -24,8 +24,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * An extended version of cell that gives more power to CPs + * An extended version of Cell that allows CPs manipulate Tags. */ +// Added by HBASE-19092 to expose Tags to CPs (history server) w/o exposing ExtendedCell. +// Why is this in hbase-common and not in hbase-server where it is used? +// RawCell is an odd name for a class that is only for CPs that want to manipulate Tags on +// server-side only w/o exposing ExtendedCell -- super rare, super exotic. @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public interface RawCell extends Cell { static final int MAX_TAGS_LENGTH = (2 * Short.MAX_VALUE) + 1; From dcb38f47dba4fbcd3739a982a8d0a5d68909e5a0 Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Mon, 21 Dec 2020 21:41:22 +0530 Subject: [PATCH 286/769] =?UTF-8?q?HBASE-25371:=20When=20openRegion=20fail?= =?UTF-8?q?s=20during=20initial=20verification(before=E2=80=A6=20(#2785)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: stack --- .../java/org/apache/hadoop/hbase/regionserver/HRegion.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index ed32fd5293a5..493b74b6b9ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -8135,7 +8135,9 @@ protected HRegion openHRegion(final CancelableProgressable reporter) // MetricsRegionWrapperImpl is already init and not close, // add region close when open failed try { - this.close(); + // It is not required to write sequence id file when region open is failed. + // Passing true to skip the sequence id file write. + this.close(true); } catch (Throwable e) { LOG.warn("Open region: {} failed. Try close region but got exception ", this.getRegionInfo(), e); From 0f868da05d7ffabe4512a0cae110ed097b033ebf Mon Sep 17 00:00:00 2001 From: Huang Zhuoyue Date: Tue, 22 Dec 2020 17:28:25 +0800 Subject: [PATCH 287/769] HBASE-25443 Improve the experience of using the Master webpage by change the loading process of snapshot list to asynchronous --- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 35 ----------- .../hbase-webapps/master/userSnapshots.jsp | 58 +++++++++++++++++++ .../resources/hbase-webapps/static/js/tab.js | 6 +- 3 files changed, 63 insertions(+), 36 deletions(-) create mode 100644 hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 602122db4a31..14e82e8b970c 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -270,7 +270,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
    - <& userSnapshots &>
    @@ -657,40 +656,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); -<%def userSnapshots> -<%java> - List snapshots = master.isInitialized() ? - master.getSnapshotManager().getCompletedSnapshots() : null; - -<%if (snapshots != null && snapshots.size() > 0)%> -
    Id Parent
    - - - - - - - - <%for SnapshotDescription snapshotDesc : snapshots%> - <%java> - TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); - - - - - - - - - - -

    <% snapshots.size() %> snapshot(s) in set. [Snapshot Storefile stats]

    -
    Snapshot NameTableCreation TimeOwnerTTL
    <% snapshotDesc.getName() %> <% snapshotTable.getNameAsString() %> - <% new Date(snapshotDesc.getCreationTime()) %><% snapshotDesc.getOwner() %> - <% snapshotDesc.getTtl() == 0 ? "FOREVER": PrettyPrinter.format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL) %> -
    - - <%def deadRegionServers> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp new file mode 100644 index 000000000000..0b741e1089fd --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp @@ -0,0 +1,58 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/plain;charset=UTF-8" + import="java.util.List" + import="java.util.Date" + import="org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hadoop.hbase.TableName" + import="org.apache.hadoop.hbase.util.PrettyPrinter" +%> +<% + HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + List snapshots = master.isInitialized() ? + master.getSnapshotManager().getCompletedSnapshots() : null; +%> +<%if (snapshots != null && snapshots.size() > 0) { %> + + + + + + + + + <% for (SnapshotDescription snapshotDesc : snapshots){ %> + <% TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); %> + + + + + + + + + <% } %> +

    <%= snapshots.size() %> snapshot(s) in set. [Snapshot Storefile stats]

    +
    Snapshot NameTableCreation TimeOwnerTTL
    <%= snapshotDesc.getName() %> <%= snapshotTable.getNameAsString() %> + <%= new Date(snapshotDesc.getCreationTime()) %><%= snapshotDesc.getOwner() %> + <%= snapshotDesc.getTtl() == 0 ? "FOREVER": PrettyPrinter.format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL) %> +
    +<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js b/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js index 31438a1b36f9..808882b9f40c 100644 --- a/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js +++ b/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js @@ -26,6 +26,10 @@ $(document).ready( location.hash = $(e.target).attr('href').substr(1).replace(prefix, ""); $(this).tab('show'); }); + + $.ajax({url:"/userSnapshots.jsp", success:function(result){ + $("#tab_userSnapshots").html(result); + }}); if (location.hash !== '') { var tabItem = $('a[href="' + location.hash.replace("#", "#"+prefix) + '"]'); @@ -35,4 +39,4 @@ $(document).ready( } return true; } -); \ No newline at end of file +); From 140c7f6ea06007f767cf0afcffcca832d7a44653 Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Mon, 28 Dec 2020 13:02:06 +0530 Subject: [PATCH 288/769] HBASE-24850 CellComparator perf improvement (#2802) * Using ContiguousCellFormat as a marker alone * Commit the new file * Fix the comparator logic that was an oversight * Fix the sequenceId check order * Adding few more static methods that helps in scan flow like query matcher where we have more cols * Remove ContiguousCellFormat and ensure compare() can be inlined * applying negation as per review comment * Fix checkstyle comments * fix review comments * Address review comments * Fix the checkstyle issues * Fix javadoc Signed-off-by: stack Signed-off-by: AnoopSamJohn Signed-off-by: huaxiangsun --- .../apache/hadoop/hbase/BBKVComparator.java | 173 ------ .../hbase/ByteBufferKeyOnlyKeyValue.java | 12 +- .../hadoop/hbase/CellComparatorImpl.java | 525 ++++++++++++++++-- .../org/apache/hadoop/hbase/CellUtil.java | 50 +- .../org/apache/hadoop/hbase/KeyValue.java | 56 +- .../hadoop/hbase/TestByteBufferKeyValue.java | 2 +- .../regionserver/DataBlockEncodingTool.java | 3 +- 7 files changed, 588 insertions(+), 233 deletions(-) delete mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java deleted file mode 100644 index bc76a9df37e6..000000000000 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.util.Comparator; - -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.primitives.Longs; - -/** - * A comparator for case where {@link ByteBufferKeyValue} is prevalent type (BBKV - * is base-type in hbase2). Takes a general comparator as fallback in case types are NOT the - * expected ByteBufferKeyValue. - * - *

    This is a tricked-out Comparator at heart of hbase read and write. It is in - * the HOT path so we try all sorts of ugly stuff so we can go faster. See below - * in this javadoc comment for the list. - * - *

    Apply this comparator narrowly so it is fed exclusively ByteBufferKeyValues - * as much as is possible so JIT can settle (e.g. make one per ConcurrentSkipListMap - * in HStore). - * - *

    Exploits specially added methods in BBKV to save on deserializations of shorts, - * longs, etc: i.e. calculating the family length requires row length; pass it in - * rather than recalculate it, and so on. - * - *

    This comparator does static dispatch to private final methods so hotspot is comfortable - * deciding inline. - * - *

    Measurement has it that we almost have it so all inlines from memstore - * ConcurrentSkipListMap on down to the (unsafe) intrinisics that do byte compare - * and deserialize shorts and ints; needs a bit more work. - * - *

    Does not take a Type to compare: i.e. it is not a Comparator<Cell> or - * CellComparator<Cell> or Comparator<ByteBufferKeyValue> because that adds - * another method to the hierarchy -- from compare(Object, Object) - * to dynamic compare(Cell, Cell) to static private compare -- and inlining doesn't happen if - * hierarchy is too deep (it is the case here). - * - *

    Be careful making changes. Compare perf before and after and look at what - * hotspot ends up generating before committing change (jitwatch is helpful here). - * Changing this one class doubled write throughput (HBASE-20483). - */ -@InterfaceAudience.Private -public class BBKVComparator implements Comparator { - protected static final Logger LOG = LoggerFactory.getLogger(BBKVComparator.class); - private final Comparator fallback; - - public BBKVComparator(Comparator fallback) { - this.fallback = fallback; - } - - @Override - public int compare(Object l, Object r) { - if ((l instanceof ByteBufferKeyValue) && (r instanceof ByteBufferKeyValue)) { - return compare((ByteBufferKeyValue)l, (ByteBufferKeyValue)r, false); - } - // Skip calling compare(Object, Object) and go direct to compare(Cell, Cell) - return this.fallback.compare((Cell)l, (Cell)r); - } - - // TODO: Come back here. We get a few percentage points extra of throughput if this is a - // private method. - static int compare(ByteBufferKeyValue left, ByteBufferKeyValue right, - boolean ignoreSequenceid) { - // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not - // sharing gets us a few percent more throughput in compares. If changes here or there, make - // sure done in both places. - - // Compare Rows. Cache row length. - int leftRowLength = left.getRowLength(); - int rightRowLength = right.getRowLength(); - int diff = ByteBufferUtils.compareTo(left.getRowByteBuffer(), left.getRowPosition(), - leftRowLength, - right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); - if (diff != 0) { - return diff; - } - - // If the column is not specified, the "minimum" key type appears as latest in the sorted - // order, regardless of the timestamp. This is used for specifying the last key/value in a - // given row, because there is no "lexicographically last column" (it would be infinitely long). - // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in that - // we can't do memcmp w/ special rules like this. - // TODO: Is there a test for this behavior? - int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); - int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); - int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, - leftFamilyLength); - - // No need of left row length below here. - - byte leftType = left.getTypeByte(leftKeyLength); - if (leftFamilyLength + leftQualifierLength == 0 && - leftType == KeyValue.Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - - int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); - int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); - int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, - rightFamilyLength); - - // No need of right row length below here. - - byte rightType = right.getTypeByte(rightKeyLength); - if (rightFamilyLength + rightQualifierLength == 0 && - rightType == KeyValue.Type.Minimum.getCode()) { - return -1; - } - - // Compare families. - int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); - int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); - diff = ByteBufferUtils.compareTo(left.getFamilyByteBuffer(), leftFamilyPosition, - leftFamilyLength, - right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); - if (diff != 0) { - return diff; - } - - // Compare qualifiers - diff = ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), - left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, - right.getQualifierByteBuffer(), - right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), - rightQualifierLength); - if (diff != 0) { - return diff; - } - - // Timestamps. - // Swap order we pass into compare so we get DESCENDING order. - diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); - if (diff != 0) { - return diff; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - diff = (0xff & rightType) - (0xff & leftType); - if (diff != 0) { - return diff; - } - - // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid ? diff : Longs.compare(right.getSequenceId(), left.getSequenceId()); - } -} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java index cc7e8d72c3d7..d55733769ddf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java @@ -161,7 +161,11 @@ private int getTimestampOffset() { @Override public byte getTypeByte() { - return ByteBufferUtils.toByte(this.buf, this.offset + this.length - 1); + return getTypeByte(this.length); + } + + byte getTypeByte(int keyLen) { + return ByteBufferUtils.toByte(this.buf, this.offset + keyLen - 1); } @Override @@ -236,7 +240,11 @@ public int getFamilyPosition() { // The position in BB where the family length is added. private int getFamilyLengthPosition() { - return this.offset + Bytes.SIZEOF_SHORT + getRowLength(); + return getFamilyLengthPosition(getRowLength()); + } + + int getFamilyLengthPosition(int rowLength) { + return this.offset + Bytes.SIZEOF_SHORT + rowLength; } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index 4af035a94f16..d55f9bad46fe 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -34,8 +34,7 @@ * format should be taken into consideration, for which the instance of this comparator * should be used. In all other cases the static APIs in this comparator would be enough *

    HOT methods. We spend a good portion of CPU comparing. Anything that makes the compare - * faster will likely manifest at the macro level. See also - * {@link BBKVComparator}. Use it when mostly {@link ByteBufferKeyValue}s. + * faster will likely manifest at the macro level. *

    */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( @@ -57,29 +56,286 @@ public final int compare(final Cell a, final Cell b) { } @Override - public int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { - + public int compare(final Cell l, final Cell r, boolean ignoreSequenceid) { int diff = 0; // "Peel off" the most common path. - if (a instanceof ByteBufferKeyValue && b instanceof ByteBufferKeyValue) { - diff = BBKVComparator.compare((ByteBufferKeyValue)a, (ByteBufferKeyValue)b, ignoreSequenceid); + if (l instanceof KeyValue && r instanceof KeyValue) { + diff = compareKeyValues((KeyValue) l, (KeyValue) r); + if (diff != 0) { + return diff; + } + } else if (l instanceof KeyValue && r instanceof ByteBufferKeyValue) { + diff = compareKVVsBBKV((KeyValue) l, (ByteBufferKeyValue) r); + if (diff != 0) { + return diff; + } + } else if (l instanceof ByteBufferKeyValue && r instanceof KeyValue) { + diff = compareKVVsBBKV((KeyValue) r, (ByteBufferKeyValue) l); + if (diff != 0) { + // negate- Findbugs will complain? + return -diff; + } + } else if (l instanceof ByteBufferKeyValue && r instanceof ByteBufferKeyValue) { + diff = compareBBKV((ByteBufferKeyValue) l, (ByteBufferKeyValue) r); if (diff != 0) { return diff; } } else { - diff = compareRows(a, b); + int leftRowLength = l.getRowLength(); + int rightRowLength = r.getRowLength(); + diff = compareRows(l, leftRowLength, r, rightRowLength); if (diff != 0) { return diff; } - diff = compareWithoutRow(a, b); + diff = compareWithoutRow(l, r); if (diff != 0) { return diff; } } - // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid? diff: Long.compare(b.getSequenceId(), a.getSequenceId()); + return ignoreSequenceid ? diff : Long.compare(r.getSequenceId(), l.getSequenceId()); + } + + private static int compareKeyValues(final KeyValue left, final KeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowArray(), right.getRowOffset(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyOffset(rightFamilyLengthPosition); + diff = Bytes.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, + right.getFamilyArray(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = Bytes.compareTo(left.getQualifierArray(), + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierArray(), right.getQualifierOffset(rightFamilyPosition, rightFamilyLength), + rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + // TODO : Ensure we read the bytes and do the compare instead of the value. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); + } + + private static int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = ByteBufferUtils.compareTo(left.getRowByteBuffer(), left.getRowPosition(), + leftRowLength, right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + diff = ByteBufferUtils.compareTo(left.getFamilyByteBuffer(), leftFamilyPosition, + leftFamilyLength, right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); + } + + private static int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + diff = ByteBufferUtils.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, + right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = ByteBufferUtils.compareTo(left.getQualifierArray(), + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); } /** @@ -94,6 +350,65 @@ public final int compareColumns(final Cell left, final Cell right) { return compareQualifiers(left, right); } + private int compareColumns(final Cell left, final int leftFamLen, final int leftQualLen, + final Cell right, final int rightFamLen, final int rightQualLen) { + int diff = compareFamilies(left, leftFamLen, right, rightFamLen); + if (diff != 0) { + return diff; + } + return compareQualifiers(left, leftQualLen, right, rightQualLen); + } + + private int compareFamilies(Cell left, int leftFamLen, Cell right, int rightFamLen) { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, right.getFamilyArray(), + right.getFamilyOffset(), rightFamLen); + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + } + return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, + right.getFamilyArray(), right.getFamilyOffset(), rightFamLen); + } + + private final int compareQualifiers(Cell left, int leftQualLen, Cell right, int rightQualLen) { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + leftQualLen, ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + } + return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), leftQualLen, + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + } + /** * Compare the families of left and right cell * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise @@ -125,38 +440,174 @@ public final int compareFamilies(Cell left, Cell right) { right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } + static int compareQualifiers(KeyValue left, KeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyOffset = right.getFamilyOffset(rightFamilyLengthPosition); + + // Compare qualifiers + return Bytes.compareTo(left.getQualifierArray(), leftFamilyOffset + leftFamilyLength, + leftQualifierLength, right.getQualifierArray(), rightFamilyOffset + rightFamilyLength, + rightQualifierLength); + } + + static int compareQualifiers(KeyValue left, ByteBufferKeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierArray(), + leftFamilyOffset + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), + rightFamilyPosition + rightFamilyLength, rightQualifierLength); + } + + static int compareQualifiers(ByteBufferKeyValue left, KeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyOffset = right.getFamilyOffset(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierArray(), + rightFamilyOffset + rightFamilyLength, rightQualifierLength); + } + + static int compareQualifiers(ByteBufferKeyValue left, ByteBufferKeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), + rightFamilyPosition + rightFamilyLength, rightQualifierLength); + } + /** * Compare the qualifiers part of the left and right cells. * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise */ @Override public final int compareQualifiers(Cell left, Cell right) { - if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils - .compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) left).getQualifierPosition(), - left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) right).getQualifierPosition(), - right.getQualifierLength()); - } - if (left instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + if ((left instanceof ByteBufferKeyValue) && (right instanceof ByteBufferKeyValue)) { + return compareQualifiers((ByteBufferKeyValue) left, (ByteBufferKeyValue) right); + } else if ((left instanceof KeyValue) && (right instanceof KeyValue)) { + return compareQualifiers((KeyValue) left, (KeyValue) right); + } else if ((left instanceof KeyValue) && (right instanceof ByteBufferKeyValue)) { + return compareQualifiers((KeyValue) left, (ByteBufferKeyValue) right); + } else if ((left instanceof ByteBufferKeyValue) && (right instanceof KeyValue)) { + return compareQualifiers((ByteBufferKeyValue) left, (KeyValue) right); + } else { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); - } - if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but - // see what FindBugs says - // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO - // It suggest flipping the order to get same effect and 'safer'. - return ByteBufferUtils.compareTo(left.getQualifierArray(), - left.getQualifierOffset(), left.getQualifierLength(), - ((ByteBufferExtendedCell)right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell)right).getQualifierPosition(), right.getQualifierLength()); - } - return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + } + return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); + } + } /** @@ -195,8 +646,8 @@ static int compareRows(final Cell left, int leftRowLength, final Cell right, int ((ByteBufferExtendedCell)right).getRowByteBuffer(), ((ByteBufferExtendedCell)right).getRowPosition(), rightRowLength); } - return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), - right.getRowArray(), right.getRowOffset(), right.getRowLength()); + return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowArray(), right.getRowOffset(), rightRowLength); } /** @@ -249,10 +700,10 @@ public final int compareWithoutRow(final Cell left, final Cell right) { } if (lFamLength != rFamLength) { // comparing column family is enough. - return compareFamilies(left, right); + return compareFamilies(left, lFamLength, right, rFamLength); } // Compare cf:qualifier - int diff = compareColumns(left, right); + int diff = compareColumns(left, lFamLength, lQualLength, right, rFamLength, rQualLength); if (diff != 0) { return diff; } @@ -282,7 +733,7 @@ public int compareTimestamps(final long ltimestamp, final long rtimestamp) { @Override public Comparator getSimpleComparator() { - return new BBKVComparator(this); + return this; } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index a51fa3de96ef..c3b65e32c11c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -433,6 +433,11 @@ public static boolean matchingRow(final Cell left, final byte[] buf, final int o public static boolean matchingFamily(final Cell left, final Cell right) { byte lfamlength = left.getFamilyLength(); byte rfamlength = right.getFamilyLength(); + return matchingFamily(left, lfamlength, right, rfamlength); + } + + public static boolean matchingFamily(final Cell left, final byte lfamlength, final Cell right, + final byte rfamlength) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), lfamlength, @@ -463,6 +468,11 @@ public static boolean matchingFamily(final Cell left, final byte[] buf) { public static boolean matchingQualifier(final Cell left, final Cell right) { int lqlength = left.getQualifierLength(); int rqlength = right.getQualifierLength(); + return matchingQualifier(left, lqlength, right, rqlength); + } + + private static boolean matchingQualifier(final Cell left, final int lqlength, final Cell right, + final int rqlength) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), lqlength, @@ -516,6 +526,14 @@ public static boolean matchingColumn(final Cell left, final Cell right) { return matchingQualifier(left, right); } + private static boolean matchingColumn(final Cell left, final byte lFamLen, final int lQualLength, + final Cell right, final byte rFamLen, final int rQualLength) { + if (!matchingFamily(left, lFamLen, right, rFamLen)) { + return false; + } + return matchingQualifier(left, lQualLength, right, rQualLength); + } + public static boolean matchingValue(final Cell left, final Cell right) { return matchingValue(left, right, left.getValueLength(), right.getValueLength()); } @@ -685,6 +703,11 @@ public static boolean matchingTimestamp(Cell a, Cell b) { public static boolean matchingRows(final Cell left, final Cell right) { short lrowlength = left.getRowLength(); short rrowlength = right.getRowLength(); + return matchingRows(left, lrowlength, right, rrowlength); + } + + public static boolean matchingRows(final Cell left, final short lrowlength, final Cell right, + final short rrowlength) { if (lrowlength != rrowlength) return false; if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getRowByteBuffer(), @@ -713,16 +736,29 @@ public static boolean matchingRows(final Cell left, final Cell right) { * @return True if same row and column. */ public static boolean matchingRowColumn(final Cell left, final Cell right) { - if ((left.getRowLength() + left.getFamilyLength() - + left.getQualifierLength()) != (right.getRowLength() + right.getFamilyLength() - + right.getQualifierLength())) { + short lrowlength = left.getRowLength(); + short rrowlength = right.getRowLength(); + // match length + if (lrowlength != rrowlength) { + return false; + } + + byte lfamlength = left.getFamilyLength(); + byte rfamlength = right.getFamilyLength(); + if (lfamlength != rfamlength) { return false; } - if (!matchingRows(left, right)) { + int lqlength = left.getQualifierLength(); + int rqlength = right.getQualifierLength(); + if (lqlength != rqlength) { + return false; + } + + if (!matchingRows(left, lrowlength, right, rrowlength)) { return false; } - return matchingColumn(left, right); + return matchingColumn(left, lfamlength, lqlength, right, rfamlength, rqlength); } public static boolean matchingRowColumnBytes(final Cell left, final Cell right) { @@ -732,9 +768,9 @@ public static boolean matchingRowColumnBytes(final Cell left, final Cell right) int rfamlength = right.getFamilyLength(); int lqlength = left.getQualifierLength(); int rqlength = right.getQualifierLength(); + // match length - if ((lrowlength + lfamlength + lqlength) != - (rrowlength + rfamlength + rqlength)) { + if ((lrowlength != rrowlength) || (lfamlength != rfamlength) || (lqlength != rqlength)) { return false; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 856480f15066..79356edfea21 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; + import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -1348,14 +1349,14 @@ public byte[] getFamilyArray() { */ @Override public int getFamilyOffset() { - return getFamilyOffset(getRowLength()); + return getFamilyOffset(getFamilyLengthPosition(getRowLength())); } /** * @return Family offset */ - private int getFamilyOffset(int rlength) { - return this.offset + ROW_KEY_OFFSET + rlength + Bytes.SIZEOF_BYTE; + int getFamilyOffset(int familyLenPosition) { + return familyLenPosition + Bytes.SIZEOF_BYTE; } /** @@ -1363,14 +1364,18 @@ private int getFamilyOffset(int rlength) { */ @Override public byte getFamilyLength() { - return getFamilyLength(getFamilyOffset()); + return getFamilyLength(getFamilyLengthPosition(getRowLength())); } /** * @return Family length */ - public byte getFamilyLength(int foffset) { - return this.bytes[foffset-1]; + public byte getFamilyLength(int famLenPos) { + return this.bytes[famLenPos]; + } + + int getFamilyLengthPosition(int rowLength) { + return this.offset + KeyValue.ROW_KEY_OFFSET + rowLength; } /** @@ -1393,7 +1398,14 @@ public int getQualifierOffset() { * @return Qualifier offset */ private int getQualifierOffset(int foffset) { - return foffset + getFamilyLength(foffset); + return getQualifierOffset(foffset, getFamilyLength()); + } + + /** + * @return Qualifier offset + */ + int getQualifierOffset(int foffset, int flength) { + return foffset + flength; } /** @@ -1408,7 +1420,14 @@ public int getQualifierLength() { * @return Qualifier length */ private int getQualifierLength(int rlength, int flength) { - return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); + return getQualifierLength(getKeyLength(), rlength, flength); + } + + /** + * @return Qualifier length + */ + int getQualifierLength(int keyLength, int rlength, int flength) { + return keyLength - (int) getKeyDataStructureSize(rlength, flength, 0); } /** @@ -1501,7 +1520,11 @@ long getTimestamp(final int keylength) { */ @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1 + ROW_OFFSET]; + return getTypeByte(getKeyLength()); + } + + byte getTypeByte(int keyLength) { + return this.bytes[this.offset + keyLength - 1 + ROW_OFFSET]; } /** @@ -1875,8 +1898,8 @@ public int compareRows(final Cell left, final Cell right) { * @param rlength * @return 0 if equal, <0 if left smaller, >0 if right smaller */ - public int compareRows(byte [] left, int loffset, int llength, - byte [] right, int roffset, int rlength) { + public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, + int rlength) { return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); } @@ -2449,6 +2472,10 @@ public byte getFamilyLength() { return this.bytes[getFamilyOffset() - 1]; } + int getFamilyLengthPosition(int rowLength) { + return this.offset + Bytes.SIZEOF_SHORT + rowLength; + } + @Override public int getFamilyOffset() { return this.offset + Bytes.SIZEOF_SHORT + getRowLength() + Bytes.SIZEOF_BYTE; @@ -2481,9 +2508,14 @@ public short getRowLength() { @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1]; + return getTypeByte(getKeyLength()); } + byte getTypeByte(int keyLength) { + return this.bytes[this.offset + keyLength - 1]; + } + + private int getQualifierLength(int rlength, int flength) { return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java index 6443d84ebd28..d6c8a75f78d5 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java @@ -69,7 +69,7 @@ public void testCompare() { assertTrue(CellComparatorImpl.COMPARATOR.compare(cell1, cell3) < 0); Cell cell4 = getOffheapCell(row1, Bytes.toBytes("f"), qual2); assertTrue(CellComparatorImpl.COMPARATOR.compare(cell1, cell4) > 0); - BBKVComparator comparator = new BBKVComparator(null); + CellComparator comparator = CellComparator.getInstance(); assertTrue(comparator.compare(cell1, cell2) < 0); assertTrue(comparator.compare(cell1, cell3) < 0); assertTrue(comparator.compare(cell1, cell4) > 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 0fb183e4909b..a8efa16047da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -221,7 +221,8 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) int kLen = currentKV.getKeyLength(); int vLen = currentKV.getValueLength(); - int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset()); + int cfOffset = currentKV.getFamilyOffset(); + int cfLen = currentKV.getFamilyLength(); int restLen = currentKV.getLength() - kLen - vLen; totalKeyLength += kLen; From d963342f8a54877cb212618ba35d75a7d6c2005b Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Tue, 29 Dec 2020 02:57:30 +0800 Subject: [PATCH 289/769] HBASE-25432:add security checks for setTableStateInMeta and fixMeta (#2809) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../hbase/master/MasterRpcServices.java | 2 ++ .../hbase/security/access/SecureTestUtil.java | 4 +++ .../security/access/TestAccessController.java | 30 +++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index a68aa6650959..a11713276d77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -2527,6 +2527,7 @@ public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest re @Override public GetTableStateResponse setTableStateInMeta(RpcController controller, SetTableStateInMetaRequest request) throws ServiceException { + rpcPreCheck("setTableStateInMeta"); TableName tn = ProtobufUtil.toTableName(request.getTableName()); try { TableState prevState = this.master.getTableStateManager().getTableState(tn); @@ -2732,6 +2733,7 @@ public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProc @Override public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request) throws ServiceException { + rpcPreCheck("fixMeta"); try { MetaFixer mf = new MetaFixer(this.master); mf.fix(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java index 840c30d80f54..6e0ef5411b17 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; @@ -249,6 +250,9 @@ public static void verifyDenied(User user, AccessTestAction... actions) throws E // is buried in the stack trace Throwable ex = e; do { + if (ex instanceof RemoteWithExtrasException) { + ex = ((RemoteWithExtrasException) ex).unwrapRemoteException(); + } if (ex instanceof AccessDeniedException) { isAccessDeniedException = true; break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index d53a84c260df..7ab808cf1823 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Hbck; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; @@ -72,6 +73,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -379,6 +381,34 @@ public void testUnauthorizedStopMaster() throws Exception { USER_GROUP_WRITE, USER_GROUP_CREATE); } + @Test + public void testUnauthorizedSetTableStateInMeta() throws Exception { + AccessTestAction action = () -> { + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()){ + hbck.setTableStateInMeta(new TableState(TEST_TABLE, TableState.State.DISABLED)); + } + return null; + }; + + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); + } + + @Test + public void testUnauthorizedFixMeta() throws Exception { + AccessTestAction action = () -> { + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()){ + hbck.fixMeta(); + } + return null; + }; + + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); + } + @Test public void testSecurityCapabilities() throws Exception { List capabilities = TEST_UTIL.getConnection().getAdmin() From c96fbf04077fa37555033f88939fdd69ac810b35 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Tue, 29 Dec 2020 22:25:36 +0530 Subject: [PATCH 290/769] HBASE-25379 Make retry pause time configurable for regionserver short operation RPC (reportRegionStateTransition/reportProcedureDone) (#2757) * HBASE-25379 Make retry pause time configurable for regionserver short operation RPC (reportRegionStateTransition/reportProcedureDone) * HBASE-25379 RemoteProcedureResultReporter also should retry after the configured pause time * Addressed the review comments Signed-off-by: Yulin Niu --- .../org/apache/hadoop/hbase/HConstants.java | 11 +++++++++ .../hbase/regionserver/HRegionServer.java | 23 +++++++++++++++---- .../RemoteProcedureResultReporter.java | 7 ++---- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index e1d3de9d513b..05782fc5518c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -965,6 +965,17 @@ public enum OperationStatusCode { */ public static final int DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT = 10000; + /** + * Retry pause time for short operation RPC + */ + public static final String HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME = + "hbase.rpc.shortoperation.retry.pause.time"; + + /** + * Default value of {@link #HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME} + */ + public static final long DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME = 1000; + /** * Value indicating the server name was saved with no sequence number. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 87d073c81c98..bcb143652203 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -435,6 +435,9 @@ public class HRegionServer extends Thread implements private final int shortOperationTimeout; + // Time to pause if master says 'please hold' + private final long retryPauseTime; + private final RegionServerAccounting regionServerAccounting; private SlowLogTableOpsChore slowLogTableOpsChore = null; @@ -615,6 +618,9 @@ public HRegionServer(final Configuration conf) throws IOException { this.shortOperationTimeout = conf.getInt(HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT); + this.retryPauseTime = conf.getLong(HConstants.HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME, + HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME); + this.abortRequested = new AtomicBoolean(false); this.stopped = false; @@ -2436,10 +2442,8 @@ public boolean reportRegionStateTransition(final RegionStateTransitionContext co final ReportRegionStateTransitionRequest request = createReportRegionStateTransitionRequest(context); - // Time to pause if master says 'please hold'. Make configurable if needed. - final long initPauseTime = 1000; int tries = 0; - long pauseTime; + long pauseTime = this.retryPauseTime; // Keep looping till we get an error. We want to send reports even though server is going down. // Only go down if clusterConnection is null. It is set to null almost as last thing as the // HRegionServer does down. @@ -2470,9 +2474,9 @@ public boolean reportRegionStateTransition(final RegionStateTransitionContext co || ioe instanceof CallQueueTooBigException; if (pause) { // Do backoff else we flood the Master with requests. - pauseTime = ConnectionUtils.getPauseTime(initPauseTime, tries); + pauseTime = ConnectionUtils.getPauseTime(this.retryPauseTime, tries); } else { - pauseTime = initPauseTime; // Reset. + pauseTime = this.retryPauseTime; // Reset. } LOG.info("Failed report transition " + TextFormat.shortDebugString(request) + "; retry (#" + tries + ")" + @@ -3938,4 +3942,13 @@ public AsyncClusterConnection getAsyncClusterConnection() { public CompactedHFilesDischarger getCompactedHFilesDischarger() { return compactedFileDischarger; } + + /** + * Return pause time configured in {@link HConstants#HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME}} + * @return pause time + */ + @InterfaceAudience.Private + public long getRetryPauseTime() { + return this.retryPauseTime; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java index 981f090534a3..63e050a710ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java @@ -41,9 +41,6 @@ class RemoteProcedureResultReporter extends Thread { private static final Logger LOG = LoggerFactory.getLogger(RemoteProcedureResultReporter.class); - // Time to pause if master says 'please hold'. Make configurable if needed. - private static final int INIT_PAUSE_TIME_MS = 1000; - private static final int MAX_BATCH = 100; private final HRegionServer server; @@ -98,9 +95,9 @@ public void run() { long pauseTime; if (pause) { // Do backoff else we flood the Master with requests. - pauseTime = ConnectionUtils.getPauseTime(INIT_PAUSE_TIME_MS, tries); + pauseTime = ConnectionUtils.getPauseTime(server.getRetryPauseTime(), tries); } else { - pauseTime = INIT_PAUSE_TIME_MS; // Reset. + pauseTime = server.getRetryPauseTime(); // Reset. } LOG.info("Failed procedure report " + TextFormat.shortDebugString(request) + "; retry (#" + tries + ")" + (pause ? " after " + pauseTime + "ms delay (Master is coming online...)." From 55a4eca9e6ef29f822f51c772ad4460b6102a156 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 30 Dec 2020 09:47:56 +0800 Subject: [PATCH 291/769] HBASE-25452 Use MatcherAssert.assertThat instead of org.junit.Assert.assertThat (#2826) Signed-off-by: Guanghao Zhang --- .../org/apache/hadoop/hbase/ipc/TestFailedServersLog.java | 2 +- .../hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java | 2 +- .../test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java | 2 +- .../java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java | 2 +- .../test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java | 3 +-- .../org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java | 3 +-- .../apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java | 2 +- .../apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java | 2 +- .../hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java | 3 +-- .../hbase/hbtop/screen/field/TestFieldScreenPresenter.java | 2 +- .../hbase/hbtop/screen/help/TestHelpScreenPresenter.java | 2 +- .../hbase/hbtop/screen/mode/TestModeScreenPresenter.java | 2 +- .../screen/top/TestFilterDisplayModeScreenPresenter.java | 2 +- .../hbase/hbtop/screen/top/TestInputModeScreenPresenter.java | 2 +- .../hbtop/screen/top/TestMessageModeScreenPresenter.java | 2 +- .../org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java | 2 +- .../hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java | 2 +- .../apache/hadoop/hbase/http/TestSecurityHeadersFilter.java | 3 ++- .../hadoop/hbase/replication/TestReplicationStateBasic.java | 3 +-- .../hbase/replication/TestZKReplicationQueueStorage.java | 3 +-- .../apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java | 3 ++- .../hbase/client/TestAsyncAdminWithRegionReplicas.java | 2 +- .../apache/hadoop/hbase/client/TestAsyncBufferMutator.java | 2 +- .../hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java | 2 +- .../apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java | 5 +++-- .../apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java | 2 +- .../apache/hadoop/hbase/client/TestAsyncRegionLocator.java | 2 +- .../hadoop/hbase/client/TestAsyncReplicationAdminApi.java | 2 +- .../client/TestAsyncSingleRequestRpcRetryingCaller.java | 2 +- .../java/org/apache/hadoop/hbase/client/TestAsyncTable.java | 2 +- .../org/apache/hadoop/hbase/client/TestAsyncTableBatch.java | 2 +- .../hadoop/hbase/client/TestAsyncTableScanException.java | 2 +- .../org/apache/hadoop/hbase/client/TestCheckAndMutate.java | 2 +- .../org/apache/hadoop/hbase/client/TestFromClientSide4.java | 2 +- .../hadoop/hbase/client/TestFromClientSideScanExcpetion.java | 2 +- .../hadoop/hbase/client/TestScannersFromClientSide.java | 2 +- .../apache/hadoop/hbase/client/TestZKConnectionRegistry.java | 2 +- .../hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java | 2 +- .../apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java | 3 +-- .../hbase/regionserver/TestCompactionLifeCycleTracker.java | 2 +- .../hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java | 2 +- .../apache/hadoop/hbase/regionserver/TestSplitLogWorker.java | 2 +- .../hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java | 2 +- .../hadoop/hbase/replication/SyncReplicationTestBase.java | 2 +- .../hadoop/hbase/replication/TestSerialSyncReplication.java | 2 +- .../hadoop/hbase/replication/TestSyncReplicationActive.java | 2 +- .../TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java | 2 +- .../replication/TestSyncReplicationRemoveRemoteWAL.java | 2 +- .../hadoop/hbase/replication/TestSyncReplicationStandBy.java | 2 +- .../hbase/security/token/TestGenerateDelegationToken.java | 2 +- .../hadoop/hbase/wal/TestSyncReplicationWALProvider.java | 2 +- .../apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java | 3 ++- 56 files changed, 61 insertions(+), 63 deletions(-) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java index 4036a51f01c9..fa44022f8d09 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.ipc; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java index ca2829a8065a..ba1e27258d2d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.ipc; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java index da0d917a826e..339cc40847d3 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hbase.hbtop.Record.entry; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.field.Field; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java index 9dec51e0ce8a..2807fd8ef61e 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java @@ -21,7 +21,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import java.util.ArrayList; import java.util.Arrays; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java index 905e4c8fd7a2..c633e37825ea 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.text.ParseException; @@ -27,7 +27,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetricsBuilder; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java index beb0ee8075d4..dcbdb6b9b8ab 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.field; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java index 106cfe4af47b..4f0864838532 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.TestUtils; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java index 04fd03d1663d..6c498e94eb1d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java index ed397f6adc66..b705531475f3 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java @@ -19,7 +19,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java index ec29fd38f0a1..cbfc7283fc64 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java index 722aa2db03ad..a73d54ea6bb9 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java index 6889639f4584..f718304671c4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java index 92ca7767936e..f094c85f5481 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.TestUtils; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java index 2e2931fd1c17..cbf740430b0a 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.field; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java index 0f7b4e3d063e..245bf615e731 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.help; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java index e6c75b5737dc..1b7e12a6240f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.never; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java index 99c29c92d131..414b5b0702c5 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java index a5357cc303ed..b5e9bb9f3ba6 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.inOrder; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java index d4507597579f..0acd79c56d2d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java index 7cba9f6aef36..e0c09dfe1673 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java index 85b901048954..44a8878407a0 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java index 41a1235baaf4..6b9d2c341ed7 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java @@ -22,7 +22,8 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; + import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index 348271905fcd..4bb1021b7a42 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java index 74a24ac1eb62..4f1fd3908687 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -28,7 +28,6 @@ import java.util.List; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtility; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java index bf0c69502d52..02611dfaf905 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hbase.rest; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsEqual.equalTo; -import static org.junit.Assert.assertThat; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.rest.client.Client; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index 3596f1c0025d..c447510cbe4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.List; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java index 5e7f6cc5a0f7..874a01c8c711 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index b147d9120f64..6404a89671b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -22,10 +22,10 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import static org.apache.hadoop.hbase.client.RegionReplicaTestHelper.testLocator; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Arrays; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index f8f4e076c804..c61a289df23f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -18,12 +18,13 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -42,9 +43,9 @@ import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStates; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java index 56c1047095bf..c9d47dc65323 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java @@ -19,10 +19,10 @@ import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index 753e4f38d7ca..03eac06a5710 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT; import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.REGION_COPROCESSOR_CONF_KEY; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java index 3defa80421e5..74b5c2fbd3c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java @@ -20,11 +20,11 @@ import static org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index 4205012db112..bf8ce01752e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java index 9e6748e34372..c863ec12a96d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java @@ -19,11 +19,11 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java index 3fb1a14e2477..4fb050ea287c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java @@ -19,11 +19,11 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java index 96ec86b0bfd3..3dbb1d01e820 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java index c40f2c77f4ad..262b6080538b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java index 4c5985a25b3c..5c6a98d4b913 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java index f7f74507f436..a4f79e79667d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 00b8a64b1153..43d14185864f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -21,12 +21,12 @@ import static org.apache.hadoop.hbase.client.TestFromClientSide3.generateHugeValue; import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 427222f8e40c..ac0e19355894 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import java.io.IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java index d6a007797ea9..b631cf96eacb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java index e685556fd639..94186f227521 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java @@ -16,9 +16,9 @@ package org.apache.hadoop.hbase.quotas; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -27,7 +27,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java index 6cd91a711408..b124e288a481 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.regionserver; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 9acc928756ff..631bc4514489 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.io.InterruptedIOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 2c52bc0c2105..ce5466c57c9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 8a82848f3658..9c2340cb8313 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.regionserver.wal; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java index 23753e211054..a8f3442785fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java index 6d9f1322b340..869d9890d11a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.endsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java index 42adab60b5cb..e87655309e74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java index 9a6d242125b7..47f2b2cceb70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java index 9f8982604537..04b5d65318a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.endsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import org.apache.hadoop.fs.FileStatus; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java index 3bfd9a8a4810..0c1c350b2aae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java index c05bcd49f101..dae3abc913ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java @@ -19,8 +19,8 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Arrays; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java index 8189cef081d4..c40ebf82ea0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java @@ -19,8 +19,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Optional; diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java index ca661458fe92..a6d9c0f47323 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.zookeeper; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -25,7 +26,6 @@ import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -36,6 +36,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; + import java.io.IOException; import java.util.Collections; import java.util.List; From 29a9a16de4199733d331746a7d1b4e6d670e55d9 Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Thu, 31 Dec 2020 02:52:26 +0800 Subject: [PATCH 292/769] HBASE-25441 : add security check for some APIs in RSRpcServices (#2810) Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/master/HMaster.java | 13 +++++++++++++ .../hadoop/hbase/regionserver/RSRpcServices.java | 9 +++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a61254f56101..a1e68bf3c9e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2784,6 +2784,19 @@ public boolean isInitialized() { return initialized.isReady(); } + /** + * Report whether this master is started + * + * This method is used for testing. + * + * @return true if master is ready to go, false if not. + */ + + @Override + public boolean isOnline() { + return serviceStarted; + } + /** * Report whether this master is in maintenance mode. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index f84a6ebbf4aa..78926d6c39d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2345,6 +2345,7 @@ public RollWALWriterResponse rollWALWriter(final RpcController controller, @QosPriority(priority=HConstants.ADMIN_QOS) public StopServerResponse stopServer(final RpcController controller, final StopServerRequest request) throws ServiceException { + rpcPreCheck("stopServer"); requestCount.increment(); String reason = request.getReason(); regionServer.stop(reason); @@ -2354,6 +2355,7 @@ public StopServerResponse stopServer(final RpcController controller, @Override public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, UpdateFavoredNodesRequest request) throws ServiceException { + rpcPreCheck("updateFavoredNodes"); List openInfoList = request.getUpdateInfoList(); UpdateFavoredNodesResponse.Builder respBuilder = UpdateFavoredNodesResponse.newBuilder(); for (UpdateFavoredNodesRequest.RegionUpdateInfo regionUpdateInfo : openInfoList) { @@ -3774,6 +3776,7 @@ public UpdateConfigurationResponse updateConfiguration( RpcController controller, UpdateConfigurationRequest request) throws ServiceException { try { + requirePermission("updateConfiguration", Permission.Action.ADMIN); this.regionServer.updateConfiguration(); } catch (Exception e) { throw new ServiceException(e); @@ -3806,7 +3809,8 @@ public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots( @Override public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, - ClearRegionBlockCacheRequest request) { + ClearRegionBlockCacheRequest request) throws ServiceException { + rpcPreCheck("clearRegionBlockCache"); ClearRegionBlockCacheResponse.Builder builder = ClearRegionBlockCacheResponse.newBuilder(); CacheEvictionStatsBuilder stats = CacheEvictionStats.builder(); @@ -3933,7 +3937,8 @@ private List getSlowLogPayloads(SlowLogResponseRequest request, @Override @QosPriority(priority = HConstants.ADMIN_QOS) public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, - final ClearSlowLogResponseRequest request) { + final ClearSlowLogResponseRequest request) throws ServiceException { + rpcPreCheck("clearSlowLogsResponses"); final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder(); boolean slowLogsCleaned = Optional.ofNullable(namedQueueRecorder) From fbf00f9c287acee55b02993f112b514247cddd86 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 31 Dec 2020 16:57:27 +0800 Subject: [PATCH 293/769] HBASE-25451 Upgrade commons-io to 2.8.0 (#2825) Signed-off-by: Guanghao Zhang Signed-off-by: stack --- .../hbase/client/AsyncConnectionImpl.java | 7 +++--- .../client/example/AsyncClientExample.java | 2 +- .../hbase/io/FSDataInputStreamWrapper.java | 8 +++---- .../apache/hadoop/hbase/io/hfile/HFile.java | 3 ++- .../hadoop/hbase/io/hfile/HFileInfo.java | 17 +++++++++---- .../apache/hadoop/hbase/master/HMaster.java | 4 ++-- .../apache/hadoop/hbase/util/HBaseFsck.java | 13 +++++----- .../apache/hadoop/hbase/util/RegionMover.java | 4 ++-- ...tractTestAsyncTableRegionReplicasRead.java | 5 ++-- .../hbase/client/TestAsyncAdminBase.java | 5 ++-- .../hbase/client/TestAsyncAdminBuilder.java | 5 ++-- .../client/TestAsyncClusterAdminApi2.java | 5 ++-- .../client/TestAsyncMetaRegionLocator.java | 5 ++-- .../client/TestAsyncNonMetaRegionLocator.java | 5 ++-- ...ncNonMetaRegionLocatorConcurrenyLimit.java | 5 ++-- .../hbase/client/TestAsyncRegionLocator.java | 5 ++-- ...stAsyncSingleRequestRpcRetryingCaller.java | 5 ++-- .../hadoop/hbase/client/TestAsyncTable.java | 5 ++-- ...AsyncTableLocateRegionForDeletedTable.java | 5 ++-- .../client/TestAsyncTableNoncedRetry.java | 5 ++-- .../client/TestAsyncTableScanMetrics.java | 5 ++-- ...talogReplicaLoadBalanceSimpleSelector.java | 6 +++-- .../client/TestMetaRegionLocationCache.java | 5 ++-- .../client/TestZKConnectionRegistry.java | 5 ++-- ...TestMasterOperationsForRegionReplicas.java | 4 ++-- .../TestEndToEndSplitTransaction.java | 24 ++++++------------- .../TestSplitTransactionOnCluster.java | 4 ++-- .../replication/TestReplicationBase.java | 12 ++++++---- .../TestReplicationSyncUpToolBase.java | 13 ++++++---- .../TestSerialReplicationEndpoint.java | 5 ++-- pom.xml | 2 +- 31 files changed, 113 insertions(+), 90 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 83beaf1f40ba..1dbb7e6d211a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -212,9 +212,10 @@ public void close() { if(LOG.isDebugEnabled()){ logCallStack(Thread.currentThread().getStackTrace()); } - IOUtils.closeQuietly(clusterStatusListener); - IOUtils.closeQuietly(rpcClient); - IOUtils.closeQuietly(registry); + IOUtils.closeQuietly(clusterStatusListener, + e -> LOG.warn("failed to close clusterStatusListener", e)); + IOUtils.closeQuietly(rpcClient, e -> LOG.warn("failed to close rpcClient", e)); + IOUtils.closeQuietly(registry, e -> LOG.warn("failed to close registry", e)); if (choreService != null) { choreService.shutdown(); } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java index b773ee89ff57..fdbdbc6244f8 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java @@ -111,7 +111,7 @@ private CompletableFuture closeConn() { CompletableFuture closeFuture = new CompletableFuture<>(); addListener(f, (conn, error) -> { if (error == null) { - IOUtils.closeQuietly(conn); + IOUtils.closeQuietly(conn, e -> LOG.warn("failed to close conn", e)); } closeFuture.complete(null); }); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index d83a9d9da90a..5bbc525b8459 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -21,8 +21,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; @@ -33,6 +31,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, * as well as closing streams. Initialization is not thread-safe, but normal operation is; @@ -289,11 +289,11 @@ public void close() { } updateInputStreamStatistics(this.streamNoFsChecksum); // we do not care about the close exception as it is for reading, no data loss issue. - IOUtils.closeQuietly(streamNoFsChecksum); + Closeables.closeQuietly(streamNoFsChecksum); updateInputStreamStatistics(stream); - IOUtils.closeQuietly(stream); + Closeables.closeQuietly(stream); } public HFileSystem getHfs() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 52b6359d92cd..ed0e84deace6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -505,7 +505,8 @@ public static Reader createReader(ReaderContext context, HFileInfo fileInfo, throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion()); } } catch (Throwable t) { - IOUtils.closeQuietly(context.getInputStreamWrapper()); + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 5d65ff3b3a39..072e5b10628a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -44,7 +44,11 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair; @@ -62,6 +66,9 @@ */ @InterfaceAudience.Private public class HFileInfo implements SortedMap { + + private static final Logger LOG = LoggerFactory.getLogger(HFileInfo.class); + static final String RESERVED_PREFIX = "hfile."; static final byte[] RESERVED_PREFIX_BYTES = Bytes.toBytes(RESERVED_PREFIX); static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); @@ -344,7 +351,8 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr this.hfileContext = createHFileContext(path, trailer, conf); context.getInputStreamWrapper().unbuffer(); } catch (Throwable t) { - IOUtils.closeQuietly(context.getInputStreamWrapper()); + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); } @@ -382,9 +390,10 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException { // close the block reader context.getInputStreamWrapper().unbuffer(); } catch (Throwable t) { - IOUtils.closeQuietly(context.getInputStreamWrapper()); - throw new CorruptHFileException("Problem reading data index and meta index from file " - + context.getFilePath(), t); + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); + throw new CorruptHFileException( + "Problem reading data index and meta index from file " + context.getFilePath(), t); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a1e68bf3c9e4..cbe001e91588 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -51,7 +51,6 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.servlet.http.HttpServlet; -import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -221,6 +220,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server; @@ -832,7 +832,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc HBaseFsck.createLockRetryCounterFactory(this.conf).create()); } finally { if (result != null) { - IOUtils.closeQuietly(result.getSecond()); + Closeables.close(result.getSecond(), true); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 4ea4c9e14dfb..28f0d5eb887b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -138,6 +138,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -511,7 +512,7 @@ private void unlockHbck() { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); do { try { - IOUtils.closeQuietly(hbckOutFd); + Closeables.close(hbckOutFd, true); CommonFSUtils.delete(CommonFSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true); LOG.info("Finishing hbck"); return; @@ -564,7 +565,7 @@ public void connect() throws IOException { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { - IOUtils.closeQuietly(HBaseFsck.this); + IOUtils.closeQuietly(HBaseFsck.this, e -> LOG.warn("", e)); cleanupHbckZnode(); unlockHbck(); } @@ -863,9 +864,9 @@ public void close() throws IOException { zkw.close(); zkw = null; } - IOUtils.closeQuietly(admin); - IOUtils.closeQuietly(meta); - IOUtils.closeQuietly(connection); + IOUtils.closeQuietly(admin, e -> LOG.warn("", e)); + IOUtils.closeQuietly(meta, e -> LOG.warn("", e)); + IOUtils.closeQuietly(connection, e -> LOG.warn("", e)); } } @@ -3845,7 +3846,7 @@ public HBaseFsck exec(ExecutorService exec, String[] args) setRetCode(code); } } finally { - IOUtils.closeQuietly(this); + IOUtils.closeQuietly(this, e -> LOG.warn("", e)); } return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 2f7d351ff996..08042efda68f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -121,8 +121,8 @@ private RegionMover() { @Override public void close() { - IOUtils.closeQuietly(this.admin); - IOUtils.closeQuietly(this.conn); + IOUtils.closeQuietly(this.admin, e -> LOG.warn("failed to close admin", e)); + IOUtils.closeQuietly(this.conn, e -> LOG.warn("failed to close conn", e)); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java index 65c537aa31bc..0a72d836bbc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java @@ -28,7 +28,6 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -46,6 +45,8 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + public abstract class AbstractTestAsyncTableRegionReplicasRead { protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -152,7 +153,7 @@ protected static void waitUntilAllReplicasHaveRow(byte[] row) throws IOException @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java index 70cffd8cfe10..e895f164cb95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java @@ -26,7 +26,6 @@ import java.util.concurrent.ForkJoinPool; import java.util.function.Supplier; import java.util.regex.Pattern; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.StartMiniClusterOption; @@ -43,6 +42,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Class to test AsyncAdmin. */ @@ -92,7 +93,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java index f5df30dbba2a..d4d0703c9c2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java @@ -29,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -53,6 +52,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @RunWith(Parameterized.class) @Category({ LargeTests.class, ClientTests.class }) public class TestAsyncAdminBuilder { @@ -98,7 +99,7 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java index ab6515321c30..e52a2562a456 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.concurrent.TimeUnit; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.HMaster; @@ -37,6 +36,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Only used to test stopMaster/stopRegionServer/shutdown methods. */ @@ -71,7 +72,7 @@ public void setUp() throws Exception { @After @Override public void tearDown() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index 733787773aa0..150670706425 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hbase.client.RegionReplicaTestHelper.testLocator; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; @@ -34,6 +33,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncMetaRegionLocator { @@ -60,7 +61,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(REGISTRY); + Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index 6404a89671b8..99d98c92e7b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -35,7 +35,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -63,6 +62,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) @RunWith(Parameterized.class) public class TestAsyncNonMetaRegionLocator { @@ -116,7 +117,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 88ab3ade2934..6aad76c37a47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -32,7 +32,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -55,6 +54,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncNonMetaRegionLocatorConcurrenyLimit { @@ -136,7 +137,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index 03eac06a5710..bdc688e0d9fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -32,7 +32,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -56,6 +55,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncRegionLocator { @@ -107,7 +108,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index bf8ce01752e5..58c6a04f1537 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -29,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; @@ -44,6 +43,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncSingleRequestRpcRetryingCaller { @@ -79,7 +80,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java index c863ec12a96d..f76c923c77bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java @@ -41,7 +41,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -71,6 +70,8 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @RunWith(Parameterized.class) @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncTable { @@ -128,7 +129,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); assertTrue(ASYNC_CONN.isClosed()); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java index 6ccd9bc46f1c..ac647c693701 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -34,6 +33,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Fix an infinite loop in {@link AsyncNonMetaRegionLocator}, see the comments on HBASE-21943 for * more details. @@ -69,7 +70,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); assertTrue(ASYNC_CONN.isClosed()); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java index 10b358ff6253..82cc1a8b9616 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java @@ -26,7 +26,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -47,6 +46,8 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncTableNoncedRetry { @@ -114,7 +115,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java index 7e9f5d9270ee..f5aa19676909 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java @@ -25,7 +25,6 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.ForkJoinPool; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -45,6 +44,8 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @RunWith(Parameterized.class) @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncTableScanMetrics { @@ -105,7 +106,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index 6b14286f99ca..a0b49b91a095 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -21,9 +21,9 @@ import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; + import java.io.IOException; import java.util.concurrent.TimeUnit; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -40,6 +40,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestCatalogReplicaLoadBalanceSimpleSelector { @@ -80,7 +82,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index 2bcddc9ea7f2..24e88234048d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -48,6 +47,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({SmallTests.class, MasterTests.class }) public class TestMetaRegionLocationCache { @ClassRule @@ -68,7 +69,7 @@ public static void setUp() throws Exception { @AfterClass public static void cleanUp() throws Exception { - IOUtils.closeQuietly(REGISTRY); + Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index ac0e19355894..82cf0f9bbc97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -48,6 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestZKConnectionRegistry { @@ -69,7 +70,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(REGISTRY); + Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index 01d482c095f5..ad08e3fa5567 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -33,7 +33,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.ClientMetaTableAccessor; @@ -107,7 +106,8 @@ public static void setupBeforeClass() throws Exception { } private static void resetConnections() throws IOException { - IOUtils.closeQuietly(ADMIN, CONNECTION); + Closeables.close(ADMIN, true); + Closeables.close(CONNECTION, true); CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); ADMIN = CONNECTION.getAdmin(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index c01edaa0467e..48ad276af597 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -30,7 +30,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.stream.Collectors; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.ChoreService; @@ -306,24 +305,15 @@ void verifyRegionsUsingMetaTableAccessor() throws Exception { /** verify region boundaries obtained from HTable.getStartEndKeys() */ void verifyRegionsUsingHTable() throws IOException { - Table table = null; - try { - // HTable.getStartEndKeys() - table = connection.getTable(tableName); - - try (RegionLocator rl = connection.getRegionLocator(tableName)) { - Pair keys = rl.getStartEndKeys(); - verifyStartEndKeys(keys); + try (RegionLocator rl = connection.getRegionLocator(tableName)) { + Pair keys = rl.getStartEndKeys(); + verifyStartEndKeys(keys); - Set regions = new TreeSet<>(RegionInfo.COMPARATOR); - for (HRegionLocation loc : rl.getAllRegionLocations()) { - regions.add(loc.getRegion()); - } - verifyTableRegions(regions); + Set regions = new TreeSet<>(RegionInfo.COMPARATOR); + for (HRegionLocation loc : rl.getAllRegionLocations()) { + regions.add(loc.getRegion()); } - - } finally { - IOUtils.closeQuietly(table); + verifyTableRegions(regions); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 98e8c9ee2dc3..f11544f6a7ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -38,7 +38,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -116,6 +115,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -900,7 +900,7 @@ private HMaster abortAndWaitForMaster() throws IOException, InterruptedException HMaster master = cluster.startMaster().getMaster(); cluster.waitForActiveAndReadyMaster(); // reset the connections - IOUtils.closeQuietly(admin); + Closeables.close(admin, true); TESTING_UTIL.invalidateConnection(); admin = TESTING_UTIL.getAdmin(); return master; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 455b27298156..eca0d675cb7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -20,10 +20,10 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -53,8 +53,10 @@ import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** * This class is only a base for other integration-level replication tests. @@ -209,9 +211,9 @@ static void configureClusters(HBaseTestingUtility util1, conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); } - static void restartSourceCluster(int numSlaves) - throws Exception { - IOUtils.closeQuietly(hbaseAdmin, htable1); + static void restartSourceCluster(int numSlaves) throws Exception { + Closeables.close(hbaseAdmin, true); + Closeables.close(htable1, true); UTIL1.shutdownMiniHBaseCluster(); UTIL1.restartHBaseCluster(numSlaves); // Invalidate the cached connection state. @@ -222,7 +224,7 @@ static void restartSourceCluster(int numSlaves) } static void restartTargetHBaseCluster(int numSlaves) throws Exception { - IOUtils.closeQuietly(htable2); + Closeables.close(htable2, true); UTIL2.restartHBaseCluster(numSlaves); // Invalidate the cached connection state CONF2 = UTIL2.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java index ee5276de7ee7..3a45c5bdb9ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_GLOBAL; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -143,24 +142,28 @@ final void syncUp(HBaseTestingUtility util) throws Exception { // Utilities that manager shutdown / restart of source / sink clusters. They take care of // invalidating stale connections after shutdown / restarts. final void shutDownSourceHBaseCluster() throws Exception { - IOUtils.closeQuietly(ht1Source, ht2Source); + Closeables.close(ht1Source, true); + Closeables.close(ht2Source, true); UTIL1.shutdownMiniHBaseCluster(); } final void shutDownTargetHBaseCluster() throws Exception { - IOUtils.closeQuietly(ht1TargetAtPeer1, ht2TargetAtPeer1); + Closeables.close(ht1TargetAtPeer1, true); + Closeables.close(ht2TargetAtPeer1, true); UTIL2.shutdownMiniHBaseCluster(); } final void restartSourceHBaseCluster(int numServers) throws Exception { - IOUtils.closeQuietly(ht1Source, ht2Source); + Closeables.close(ht1Source, true); + Closeables.close(ht2Source, true); UTIL1.restartHBaseCluster(numServers); ht1Source = UTIL1.getConnection().getTable(TN1); ht2Source = UTIL1.getConnection().getTable(TN2); } final void restartTargetHBaseCluster(int numServers) throws Exception { - IOUtils.closeQuietly(ht1TargetAtPeer1, ht2TargetAtPeer1); + Closeables.close(ht1TargetAtPeer1, true); + Closeables.close(ht2TargetAtPeer1, true); UTIL2.restartHBaseCluster(numServers); ht1TargetAtPeer1 = UTIL2.getConnection().getTable(TN1); ht2TargetAtPeer1 = UTIL2.getConnection().getTable(TN2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java index 090129174cca..3ba26f321b03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java @@ -25,8 +25,6 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.LinkedBlockingQueue; - -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -56,6 +54,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ReplicationTests.class, MediumTests.class }) public class TestSerialReplicationEndpoint { @@ -78,7 +77,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); } diff --git a/pom.xml b/pom.xml index f23d0db549b9..749ab83842bb 100755 --- a/pom.xml +++ b/pom.xml @@ -1611,7 +1611,7 @@ 1.13 1.6 - 2.6 + 2.8.0 3.9 3.6.1 3.4.2 From 043da5f5eebd009a14780d12b4e70011b200b5bb Mon Sep 17 00:00:00 2001 From: leyangyueshan <15891721997@163.com> Date: Thu, 31 Dec 2020 22:37:31 +0800 Subject: [PATCH 294/769] HBASE-25435 Slow metric value can be configured (#2823) * HBASE-25435 Slow metric value can be configured * fix HBASE-25435 * hbase-25435 add blank * fix hbase-25435-2 Co-authored-by: stevenxi --- .../hbase/regionserver/MetricsRegionServer.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 6bbb620e1403..3bd787d10074 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -42,6 +42,7 @@ public class MetricsRegionServer { "hbase.regionserver.enable.table.latencies"; public static final boolean RS_ENABLE_TABLE_METRICS_DEFAULT = true; + public static final String SLOW_METRIC_TIME = "hbase.ipc.slow.metric.time"; private final MetricsRegionServerSource serverSource; private final MetricsRegionServerWrapper regionServerWrapper; private RegionServerTableMetrics tableMetrics; @@ -53,6 +54,8 @@ public class MetricsRegionServer { private Timer bulkLoadTimer; private Meter serverReadQueryMeter; private Meter serverWriteQueryMeter; + protected long slowMetricTime; + protected static final int DEFAULT_SLOW_METRIC_TIME = 1000; // milliseconds public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf, MetricsTable metricsTable) { @@ -68,6 +71,7 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi // create and use metrics from the new hbase-metrics based registry. bulkLoadTimer = metricRegistry.timer("Bulkload"); + slowMetricTime = conf.getLong(SLOW_METRIC_TIME, DEFAULT_SLOW_METRIC_TIME); quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class); serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond"); serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond"); @@ -109,7 +113,7 @@ public void updatePutBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePutBatch(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowPut(); } serverSource.updatePutBatch(t); @@ -135,7 +139,7 @@ public void updateDeleteBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDeleteBatch(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowDelete(); } serverSource.updateDeleteBatch(t); @@ -166,7 +170,7 @@ public void updateGet(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateGet(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowGet(); } serverSource.updateGet(t); @@ -177,7 +181,7 @@ public void updateIncrement(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateIncrement(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowIncrement(); } serverSource.updateIncrement(t); @@ -188,7 +192,7 @@ public void updateAppend(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateAppend(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowAppend(); } serverSource.updateAppend(t); From ec63cc3144d923e83e9fcd7a35d54ec80d4782aa Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Thu, 12 Nov 2020 16:52:58 -0500 Subject: [PATCH 295/769] HBASE-25279 Make ZKWatcher ExecutorService launch daemon threads Closes #2651 Signed-off-by: Duo Zhang Signed-off-by: Yulin Niu --- .../main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java index 136134d85dfc..7a9fdd689e21 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java @@ -97,7 +97,7 @@ public class ZKWatcher implements Watcher, Abortable, Closeable { // It is ok to do it in a single thread because the Zookeeper ClientCnxn already serializes the // requests using a single while loop and hence there is no performance degradation. private final ExecutorService zkEventProcessor = Executors.newSingleThreadExecutor( - new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d") + new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); private final Configuration conf; From 126d01dae301711ad65911c744ddc584549a33d2 Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Fri, 1 Jan 2021 14:47:34 +0800 Subject: [PATCH 296/769] HBASE-25456 : add security check for setRegionStateInMeta (#2833) Signed-off-by: Viraj Jasani --- .../hbase/master/MasterRpcServices.java | 1 + .../security/access/TestAccessController.java | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index a11713276d77..8f2f0dad4b7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -2550,6 +2550,7 @@ public GetTableStateResponse setTableStateInMeta(RpcController controller, @Override public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller, SetRegionStateInMetaRequest request) throws ServiceException { + rpcPreCheck("setRegionStateInMeta"); SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder(); try { for (RegionSpecifierAndState s : request.getStatesList()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 7ab808cf1823..17276173ec70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -31,7 +31,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileStatus; @@ -88,6 +91,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.locking.LockProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; @@ -395,6 +399,25 @@ public void testUnauthorizedSetTableStateInMeta() throws Exception { USER_GROUP_WRITE, USER_GROUP_CREATE); } + @Test + public void testUnauthorizedSetRegionStateInMeta() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + final List regions = admin.getRegions(TEST_TABLE); + RegionInfo closeRegion = regions.get(0); + Map newStates = new HashMap<>(); + newStates.put(closeRegion.getEncodedName(), RegionState.State.CLOSED); + AccessTestAction action = () -> { + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()){ + hbck.setRegionStateInMeta(newStates); + } + return null; + }; + + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); + } + @Test public void testUnauthorizedFixMeta() throws Exception { AccessTestAction action = () -> { From 8b9a074bf74d2449a57bad71a1655499b4704d9d Mon Sep 17 00:00:00 2001 From: GeorryHuang <215175212@qq.com> Date: Sun, 3 Jan 2021 03:22:36 +0800 Subject: [PATCH 297/769] HBASE-24751 Display Task completion time and/or processing duration on Web UI (#2815) Signed-off-by: stack --- .../org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon index f700d3994732..8d090276a807 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon @@ -105,6 +105,7 @@ String parent = ""; Description State Status + Completion Time <%for MonitoredTask task : tasks %> @@ -116,6 +117,7 @@ String parent = ""; <% task.getStatus() %> (since <% StringUtils.formatTimeDiff(now, task.getStatusTime()) %> ago) + <% task.getCompletionTimestamp() < 0 ? task.getState() : new Date(task.getCompletionTimestamp()) %> From 1ca49ea50c8a2b552af9030b4e1bdcd27f618461 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 2 Jan 2021 11:32:47 -0800 Subject: [PATCH 298/769] =?UTF-8?q?HBASE-25438=20Update=20create-release?= =?UTF-8?q?=20mvn=20in=20Dockerfile;=20its=203.6.0;=20make=20=E2=80=A6=20(?= =?UTF-8?q?#2807)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../create-release/hbase-rm/Dockerfile | 19 ++++++++++++++++--- dev-support/create-release/release-build.sh | 10 ++++++---- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index 26cb7e51abb3..ac443b64228d 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -21,6 +21,7 @@ # * Java 8 FROM ubuntu:18.04 + # Install extra needed repos and refresh. # # This is all in a single "RUN" command so that if anything changes, "apt update" is run to fetch @@ -33,7 +34,6 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ libcurl4-openssl-dev='7.58.0-*' \ libxml2-dev='2.9.4+dfsg1-*' \ lsof='4.89+dfsg-*' \ - maven='3.6.0-*' \ openjdk-8-jdk='8u*' \ python-pip='9.0.1-*' \ subversion='1.9.7-*' \ @@ -43,10 +43,23 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java \ && pip install \ python-dateutil==2.8.1 + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Install mvn 3.6.3. +ARG MAVEN_VERSION=3.6.3 +ARG SHA=c35a1803a6e70a126e80b2b3ae33eed961f83ed74d18fcd16909b2d44d7dada3203f1ffe726c17ef8dcca2dcaa9fca676987befeadc9b9f759967a8cb77181c0 +ARG BASE_URL=https://apache.osuosl.org/maven/maven-3/${MAVEN_VERSION}/binaries +RUN mkdir -p /opt/maven \ + && curl -fsSL -o /tmp/apache-maven.tar.gz ${BASE_URL}/apache-maven-${MAVEN_VERSION}-bin.tar.gz \ + && echo "${SHA} /tmp/apache-maven.tar.gz" | sha512sum -c - \ + && tar -xzf /tmp/apache-maven.tar.gz -C /opt/maven --strip-components=1 \ + && rm -f /tmp/apache-maven.tar.gz \ + && ln -s /opt/maven/bin/mvn /usr/bin/mvn + # Install Apache Yetus ENV YETUS_VERSION 0.12.0 -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN wget -qO- "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ +RUN curl "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ tar xvz -C /opt ENV YETUS_HOME /opt/apache-yetus-${YETUS_VERSION} diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index 44a594fff3d6..12cef1205bfc 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -136,19 +136,21 @@ if [[ "$1" == "tag" ]]; then git config user.name "$GIT_NAME" git config user.email "$GIT_EMAIL" + git config user.signingkey "${GPG_KEY}" # Create release version maven_set_version "$RELEASE_VERSION" + find . -name pom.xml -exec git add {} \; git add RELEASENOTES.md CHANGES.md - git commit -a -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" + git commit -s -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" log "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" - git tag "$RELEASE_TAG" + git tag -s -m "Via create-release" "$RELEASE_TAG" # Create next version maven_set_version "$NEXT_VERSION" - - git commit -a -m "Preparing development version $NEXT_VERSION" + find . -name pom.xml -exec git add {} \; + git commit -s -m "Preparing development version $NEXT_VERSION" if ! is_dry_run; then # Push changes From 2e21da7597d546fe15d1aafe253ee1917db63dda Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 2 Jan 2021 11:34:55 -0800 Subject: [PATCH 299/769] HBASE-25437 Update refguide RC section; point at the dev-tools/create-releases/README.txt rather than repeat its content (#2804) M dev-support/create-release/README.txt Remove redundant text. Add some extra help around figuring state of gpg-agent. M dev-support/create-release/do-release.sh Undo my mistaken commit where I undid test of gpg signing if under docker M dev-support/create-release/release-build.sh Handle '-h' M src/main/asciidoc/_chapters/developer.adoc Point to the README.txt under dev-tools/create-release rather than repeat the text in here. Be more insistent about using scripts. --- dev-support/create-release/README.txt | 71 +++--- dev-support/create-release/do-release.sh | 23 +- dev-support/create-release/release-build.sh | 4 + src/main/asciidoc/_chapters/developer.adoc | 248 ++++++-------------- 4 files changed, 118 insertions(+), 228 deletions(-) diff --git a/dev-support/create-release/README.txt b/dev-support/create-release/README.txt index aff562445e12..42959cd9da19 100644 --- a/dev-support/create-release/README.txt +++ b/dev-support/create-release/README.txt @@ -1,18 +1,10 @@ -Creates a HBase release candidate. The script will update versions, tag the branch, -build HBase binary packages and documentation, and upload maven artifacts to a staging -repository. There is also a dry run mode where only local builds are performed, and -nothing is uploaded to the ASF repos. - -Run with "-h" for options. For example, running below will do all -steps above using the 'rm' dir under Downloads as workspace: - - $ ./do-release-docker.sh -d ~/Downloads/rm +Creates an HBase release candidate. The scripts in this directory came originally from spark -(https://github.com/apache/spark/tree/master/dev/create-release). They were then -modified to suite the hbase context. These scripts supercedes the old -../make_rc.sh script for making release candidates because what is here is more -comprehensive doing more steps of the RM process as well as running in a +(https://github.com/apache/spark/tree/master/dev/create-release). They were +then modified to suit the hbase context. These scripts supercede the old +_../make_rc.sh_ script for making release candidates because what is here is +more comprehensive doing more steps of the RM process as well as running in a container so the RM build environment can be a constant. It: @@ -24,49 +16,52 @@ It: * Pushes release tgzs to the dev dir in a apache dist. * Pushes to repository.apache.org staging. -The entry point is here, in the do-release-docker.sh script. Requires a local -docker; for example, on mac os x, Docker for Desktop installed and running. +The entry point is the do-release-docker.sh script. It requires a local +docker; for example, on mac os x, a Docker for Desktop installed and running. - $ ./do-release-docker.sh -h +(To run a build w/o invoking docker (not recommended!), use _do_release.sh_.) -To run a build w/o invoking docker (not recommended!), use _do_release.sh_. - -Both scripts will query interactively for needed parameters and passphrases. +The scripts will query interactively for needed parameters and passphrases. For explanation of the parameters, execute: $ release-build.sh --help +The scripts run in dry-run mode by default where only local builds are +performed and nothing is uploaded to the ASF repos. Pass the '-f' flag +to remove dry-run mode. + Before starting the RC build, run a reconciliation of what is in JIRA with what is in the commit log. Make sure they align and that anomalies are -explained up in JIRA. - -See http://hbase.apache.org/book.html#maven.release +explained up in JIRA. See http://hbase.apache.org/book.html#maven.release +for how. Regardless of where your release build will run (locally, locally in docker, on a remote machine, etc) you will need a local gpg-agent with access to your -secret keys. A quick way to tell gpg to clear out state and start a gpg-agent -is via the following command phrase: - - $ gpgconf --kill all && gpg-connect-agent /bye - -Before starting an RC build, make sure your local gpg-agent has configs -to properly handle your credentials, especially if you want to avoid -typing the passphrase to your secret key. - -e.g. if you are going to run and step away, best to increase the TTL -on caching the unlocked secret via ~/.gnupg/gpg-agent.conf +secret keys. Before starting an RC build, make sure your local gpg-agent has +configs to properly handle your credentials, especially if you want to avoid +typing the passphrase to your secret key: e.g. if you are going to run +and step away (the RC creation takes ~5 hours), best to increase the TTL on +caching the unlocked secret by setting the following into local your +~/.gnupg/gpg-agent.conf file: # in seconds, e.g. a day default-cache-ttl 86400 max-cache-ttl 86400 +A quick way to tell gpg to clear out state, re-read the gpg-agent.conf file +and start a new gpg-agent is via the following command phrase: + + $ gpgconf --kill all && gpg-connect-agent /bye + +You can verify options took hold with '$ gpg --list-options gpg-agent'. + Similarly, run ssh-agent with your ssh key added if building with docker. Running a build on GCE is easy enough. Here are some notes if of use. -Create an instance. 4CPU/15G/10G disk seems to work well enough. +Create an instance. 4CPU/15G/20G disk seems to work well enough. Once up, run the below to make your machine fit for RC building: -# Presuming debian-compatible OS, do these steps on the VM -# your VM username should be your ASF id, because it will show up in build artifacts. +# Presuming debian-compatible OS, do these steps on the VM. +# Your VM username should be your ASF id, because it will show up in build artifacts. # Follow the docker install guide: https://docs.docker.com/engine/install/debian/ $ sudo apt-get install -y \ apt-transport-https \ @@ -129,7 +124,3 @@ $ git clone https://github.com/apache/hbase.git $ mkdir ~/build $ cd hbase $ ./dev-support/create-release/do-release-docker.sh -d ~/build - -# for building the main repo specifically you can save an extra download by pointing the build -# to the local clone you just made -$ ./dev-support/create-release/do-release-docker.sh -d ~/build -r .git diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index 5566b36c21e2..904d813fc3c6 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -88,20 +88,19 @@ if [ "$RUNNING_IN_DOCKER" = "1" ]; then else # Outside docker, need to ask for information about the release. get_release_info - - # Run this stuff when not in docker to check gpg. - gpg_test_file="${TMPDIR}/gpg_test.$$.txt" - echo "Testing gpg signing ${GPG} ${GPG_ARGS[@]} --detach --armor --sign ${gpg_test_file}" - echo "foo" > "${gpg_test_file}" - if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign "${gpg_test_file}" ; then - gpg_agent_help - fi - # In --batch mode we have to be explicit about what we are verifying - if ! "${GPG}" "${GPG_ARGS[@]}" --verify "${gpg_test_file}.asc" "${gpg_test_file}" ; then - gpg_agent_help - fi fi +# Check GPG +gpg_test_file="${TMPDIR}/gpg_test.$$.txt" +echo "Testing gpg signing ${GPG} ${GPG_ARGS[@]} --detach --armor --sign ${gpg_test_file}" +echo "foo" > "${gpg_test_file}" +if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign "${gpg_test_file}" ; then + gpg_agent_help +fi +# In --batch mode we have to be explicit about what we are verifying +if ! "${GPG}" "${GPG_ARGS[@]}" --verify "${gpg_test_file}.asc" "${gpg_test_file}" ; then + gpg_agent_help +fi GPG_TTY="$(tty)" export GPG_TTY diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index 12cef1205bfc..cb13110877f1 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -91,6 +91,10 @@ if [ $# -ne 1 ]; then exit_with_usage fi +if [[ "$1" == "-h" ]]; then + exit_with_usage +fi + if [[ "$*" == *"help"* ]]; then exit_with_usage fi diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index a6939920cb45..59d6b71f5fc6 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -625,54 +625,8 @@ upgrade protobuf later, then we don't have to add the profile '-Paarch64' anymor .Building against HBase 1.x [NOTE] ==== -HBase 1.x requires Java 7 to build. -See <> for Java requirements per HBase release. -==== - -[[maven.settings.xml]] -.Example _~/.m2/settings.xml_ File -==== -Publishing to maven requires you sign the artifacts you want to upload. -For the build to sign them for you, you a properly configured _settings.xml_ in your local repository under _.m2_, such as the following. - -[source,xml] ----- - - - - - apache.snapshots.https - YOUR_APACHE_ID - - YOUR_APACHE_PASSWORD - - - - - - apache.releases.https - YOUR_APACHE_ID - - YOUR_APACHE_PASSWORD - - - - - - apache-release - - YOUR_KEYNAME - - YOUR_KEY_PASSWORD - - - - - ----- +See old refguides for how to build HBase 1.x. +The below is for building hbase2. ==== [[maven.release]] @@ -699,30 +653,12 @@ target release will be included in the generated _CHANGES.md/RELEASENOTES.md_ files that ship with the release so make sure JIRA is correct before you begin. After doing the above, you can move to the manufacture of an RC. -Building an RC is involved. We've tried to script it. In the next section -we describe the script. It is followed by a description of the steps -involved which the script automates. - -[[do-release-docker.sh]] -==== Release Candidate Generating Script - -The _dev-support/create-release/do-release-docker.sh_ Release Candidate (RC) -Generating script is maintained in the master branch but can generate RCs -for any 2.x+ branch (The script does not work against branch-1). Check out -and update the master branch when making RCs. -The script builds in a Docker container to ensure we have a consistent -environment building. It will ask you for passwords for apache and for your -gpg signing key so it can sign and commit on your behalf. The passwords -are passed to gpg-agent in the container and purged along with the container -when the build is done. - -[NOTE] -==== -_dev-support/create-release/do-release-docker.sh_ supercedes the previous -_dev-support/make_rc.sh_ script. It is more comprehensive automating all -steps, rather than a portion, building a RC. -==== +Building an RC is involved so we've scripted it. The script builds in a Docker +container to ensure we have a consistent environment building. It will ask you +for passwords for apache and for your gpg signing key so it can sign and commit +on your behalf. The passwords are passed to gpg-agent in the container and +purged along with the container when the build is done. The script will: @@ -736,112 +672,25 @@ The script will: * Pushes to repository.apache.org staging. * Creates vote email template. -The RC building script is _dev-support/create-release/do-release-docker.sh_. -Pass _-h_ to _dev-support/create-release/do-release-docker.sh_ to -see available options: - -``` -Usage: do-release-docker.sh [options] - -This script runs the release scripts inside a docker image. - -Options: - - -d [path] required. working directory. output will be written to "output" in here. - -n dry run mode. Checks and local builds, but does not upload anything. - -t [tag] tag for the hbase-rm docker image to use for building (default: "latest"). - -j [path] path to local JDK installation to use building. By default the script will - use openjdk8 installed in the docker image. - -s [step] runs a single step of the process; valid steps are: tag, build, publish. if - none specified, runs tag, then build, and then publish. -``` - -Running the below command will do all steps above using the -'rm' working directory under Downloads as workspace: -``` - $ ./dev-support/create-release/do-release-docker.sh -d ~/Downloads/rm -``` - -The script will ask you a set of questions about the release version -and branch, the version to generate the compatibility report against, -and so on, before it starts executing (If you set the appropriate -environment variables, the script will skip asking you questions -- -which can come in handy if you end up having to re-run the script -multiple times). - -On branch 2.1, a Release Candidate (RC) creation can take many hours -(~8 hours) so run your build on a machine you know will be -around for this swath of time. Start the build inside a _screen_ -or _tmux_ session in case you become disconnected from your -build box. - -The build is made of three stages: tag, build, and -publish. If the script fails, you may resort to 'fixing' the -failure manually and then asking the script to run the -subsequent stage rather than start over. - -When the scripts run, they use the passed working directory. -Under the working directory is an _output_ dir. In here is -where the checkouts go, where we build up the _svn_ directory -to _svn_ commit to _apache/dist/dev_, etc. Each step also -dumps a log file in here: e.g. _tag.log_ for the tagging -step and _build.log_ for building. - -The _tagging_ step will checkout hbase, set the version number -in all the poms – e.g. if branch-2.0 is at 2.0.6-SNAPSHOT -and you are making a 2.0.5 RC, it will set the versions in -all poms to 2.0.5 – appropriately. It then generate CHANGES.md -and RELEASENOTES.md by checking out yetus and then -calling its generator scripts. It then commits the poms with -their new versions along with the changed CHANGES.md and -RELEASENOTES.md, tags, and pushes up all changes to the -apache repo. - -The _build_ step will checkout hbase, build all including -javadoc and doc (javadoc takes the bulk of the time – 4 hours plus), -run assemblies to produce src and bin tarballs, sign and hash it -all, and then make a dir under apache dist dev named for the RC. -It will copy all artifacts in here including top-level CHANGES.md -and RELEASENOTES.md. It will generate api diff docs and put them -into this RC dir too. When done, it commits the svn RC. - -The publish step will checkout hbase, build, and then copy up all -artifacts to repository.apache.org (signed and hashed). When done, -it will dump out an email template with all the correct links in place. - -Check the artifacts pushed to the dev distribution directory and up -in repository.apache.org. If all looks good, check the generated -email and send to the dev list. - -Under the create-release dir, scripts should make some sense: -``` -do-release-docker.sh # Main entrance. -do-release.sh . # More checks. Not usable really other than by setting env variables before running it. -release-tag.sh # Does tagging steps. -release-build.sh . # Does the build and publish step. -release-util.sh # Utility used by all of the above. -vote.tmpl # Template for email to send out. -hbase-rm # Has docker image we use. -``` - -If the RC fails, the script will do the right thing when it comes -to edit of the _CHANGES.md_ and _RELEASENOTES.md_ removing the old -and updating the files with the updated content (No harm verifying -though). - -One trick for checking stuff especially in utility is to do as follows: - -``` -$ source release-util.sh ; generate_api_report ../../ rel/2.1.3 2.14RC1 -``` - -i.e. source the release-util.sh script and then run one of its functions -passing args. Helped debugging stuff. - -[[rc_procedure]] +The _dev-support/create-release/do-release-docker.sh_ Release Candidate (RC) +Generating script is maintained in the master branch but can generate RCs +for any 2.x+ branch (The script does not work against branch-1). Check out +and update the master branch when making RCs. See +_dev-support/create-release/README.txt_ for how to configure your +environment and run the script. + +[NOTE] +==== +_dev-support/create-release/do-release-docker.sh_ supercedes the previous +_dev-support/make_rc.sh_ script. It is more comprehensive automating all +steps, rather than a portion, building a RC. +==== + ==== Release Candidate Procedure -Here we describe the steps involved generating a Release Candidate, the steps -automated by the script described in the previous section. +Here we outline the steps involved generating a Release Candidate, the steps +automated by the _dev-support/create-release/do-release-docker.sh_ script +described in the previous section. Running these steps manually tends to +be error-prone so is not recommended. The below is informational only. The process below makes use of various tools, mainly _git_ and _maven_. @@ -859,6 +708,53 @@ MAVEN_OPTS="-Xmx4g -XX:MaxPermSize=256m" mvn package You could also set this in an environment variable or alias in your shell. ==== +[[maven.settings.xml]] +.Example _~/.m2/settings.xml_ File +==== +Publishing to maven requires you sign the artifacts you want to upload. +For the build to sign them for you, you a properly configured _settings.xml_ +in your local repository under _.m2_, such as the following. + +[source,xml] +---- + + + + + apache.snapshots.https + YOUR_APACHE_ID + + YOUR_APACHE_PASSWORD + + + + + + apache.releases.https + YOUR_APACHE_ID + + YOUR_APACHE_PASSWORD + + + + + + apache-release + + YOUR_KEYNAME + + YOUR_KEY_PASSWORD + + + + + +---- +==== + ===== Update the _CHANGES.md_ and _RELEASENOTES.md_ files and the POM files. Update _CHANGES.md_ with the changes since the last release. Be careful with where you put From eef076828a0954b51fdae558b29351d1d6afe680 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Mon, 4 Jan 2021 01:27:08 +0800 Subject: [PATCH 300/769] Shenshengli hbase-25450 The parameter "hbase.bucketcache.size" is misdescribed (#2821) Signed-off-by: Anoop Sam John Signed-off-by: stack --- .../src/main/java/org/apache/hadoop/hbase/HConstants.java | 4 +--- hbase-common/src/main/resources/hbase-default.xml | 4 +--- .../java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java | 2 +- src/main/asciidoc/_chapters/hbase-default.adoc | 1 + 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 05782fc5518c..9a6912a49ea9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1400,9 +1400,7 @@ public enum OperationStatusCode { public static final String BUCKET_CACHE_IOENGINE_KEY = "hbase.bucketcache.ioengine"; /** - * When using bucket cache, this is a float that EITHER represents a percentage of total heap - * memory size to give to the cache (if < 1.0) OR, it is the capacity in - * megabytes of the cache. + * When using bucket cache, it is the capacity in megabytes of the cache. */ public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size"; diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index def502a62cfc..9092dd147198 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1033,9 +1033,7 @@ possible configurations would overwhelm and obscure the important. hbase.bucketcache.size - A float that EITHER represents a percentage of total heap memory - size to give to the cache (if < 1.0) OR, it is the total capacity in - megabytes of BucketCache. Default: 0.0 + It is the total capacity in megabytes of BucketCache. Default: 0.0 hbase.bucketcache.bucket.sizes diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index 471eb469b7e5..910498040e07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -234,7 +234,7 @@ public static long getOnHeapCacheSize(final Configuration conf) { } /** - * @param conf used to read config for bucket cache size. (< 1 is treated as % and > is treated as MiB) + * @param conf used to read config for bucket cache size. * @return the number of bytes to use for bucket cache, negative if disabled. */ public static long getBucketCacheSize(final Configuration conf) { diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc index 8cbc2dc4f95e..32dfb1650916 100644 --- a/src/main/asciidoc/_chapters/hbase-default.adoc +++ b/src/main/asciidoc/_chapters/hbase-default.adoc @@ -1242,6 +1242,7 @@ Whether or not the bucketcache is used in league with the LRU .Description Used along with bucket cache, this is a float that EITHER represents a percentage of total heap memory size to give to the cache (if < 1.0) OR, it is the capacity in megabytes of the cache. + (After HBase-2.0, "hbase.bucketcache.size" cannot be between 0-1) + .Default `0` when specified as a float From 8ecfcede962015bfce615d602e7e28327d41cdb6 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Mon, 4 Jan 2021 01:34:00 +0800 Subject: [PATCH 301/769] HBASE-25439 Add BYTE unit in PrettyPrinter.Unit (#2812) Signed-off-by: stack --- .../hbase/client/TableDescriptorBuilder.java | 37 +++++- .../client/TestTableDescriptorBuilder.java | 64 ++++++++- .../org/apache/hadoop/hbase/HConstants.java | 8 ++ .../hadoop/hbase/util/PrettyPrinter.java | 122 ++++++++++++++++++ hbase-shell/src/main/ruby/hbase/admin.rb | 4 +- 5 files changed, 230 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index d2cfff59f31f..fd466654ea4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -41,8 +41,10 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PrettyPrinter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -246,6 +248,15 @@ public class TableDescriptorBuilder { RESERVED_KEYWORDS.add(IS_META_KEY); } + public static PrettyPrinter.Unit getUnit(String key) { + switch (key) { + case MAX_FILESIZE: + return PrettyPrinter.Unit.BYTE; + default: + return PrettyPrinter.Unit.NONE; + } + } + /** * @deprecated namespace table has been folded into the ns family in meta table, do not use this * any more. @@ -458,11 +469,22 @@ public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { return this; } + public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException { + desc.setMaxFileSize(maxFileSize); + return this; + } + public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { desc.setMemStoreFlushSize(memstoreFlushSize); return this; } + public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize) + throws HBaseException { + desc.setMemStoreFlushSize(memStoreFlushSize); + return this; + } + public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { desc.setNormalizerTargetRegionCount(regionCount); return this; @@ -982,6 +1004,11 @@ public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); } + public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { + return setMaxFileSize(Long.parseLong(PrettyPrinter. + valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); + } + /** * Returns the size of the memstore after which a flush to filesystem is * triggered. @@ -1007,6 +1034,12 @@ public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); } + public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) + throws HBaseException { + return setMemStoreFlushSize(Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, + PrettyPrinter.Unit.BYTE))); + } + /** * This sets the class associated with the flush policy which determines * determines the stores need to be flushed when flushing a region. The @@ -1169,7 +1202,7 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForAttr = true; s.append(key); s.append(" => "); - s.append('\'').append(value).append('\''); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); } if (!userKeys.isEmpty()) { @@ -1189,7 +1222,7 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForCfg = true; s.append('\'').append(key).append('\''); s.append(" => "); - s.append('\'').append(value).append('\''); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); } s.append("}"); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index c29c83502edd..425d59022ab0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -218,6 +219,33 @@ public void testGetMaxFileSize() { assertEquals(1111L, desc.getMaxFileSize()); } + @Test + public void testSetMaxFileSize() throws HBaseException { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + + String maxFileSize = "1073741824"; + builder.setMaxFileSize(maxFileSize); + assertEquals(1073741824, builder.build().getMaxFileSize()); + + maxFileSize = "1GB"; + builder.setMaxFileSize(maxFileSize); + assertEquals(1073741824, builder.build().getMaxFileSize()); + + maxFileSize = "10GB 25MB"; + builder.setMaxFileSize(maxFileSize); + assertEquals(10763632640L, builder.build().getMaxFileSize()); + + // ignore case + maxFileSize = "10GB 512mb 512KB 512b"; + builder.setMaxFileSize(maxFileSize); + assertEquals(11274813952L, builder.build().getMaxFileSize()); + + maxFileSize = "10737942528 B (10GB 512KB)"; + builder.setMaxFileSize(maxFileSize); + assertEquals(10737942528L, builder.build().getMaxFileSize()); + } + /** * Test default value handling for memStoreFlushSize */ @@ -231,6 +259,33 @@ public void testGetMemStoreFlushSize() { assertEquals(1111L, desc.getMemStoreFlushSize()); } + @Test + public void testSetMemStoreFlushSize() throws HBaseException { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + + String memstoreFlushSize = "1073741824"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(1073741824, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "1GB"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(1073741824, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "10GB 25MB"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(10763632640L, builder.build().getMemStoreFlushSize()); + + // ignore case + memstoreFlushSize = "10GB 512mb 512KB 512b"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(11274813952L, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "10737942528 B (10GB 512KB)"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(10737942528L, builder.build().getMemStoreFlushSize()); + } + @Test public void testClassMethodsAreBuilderStyle() { BuilderStyleTest.assertClassesAreBuilderStyle(TableDescriptorBuilder.class); @@ -281,7 +336,7 @@ public void testPriority() { } @Test - public void testStringCustomizedValues() { + public void testStringCustomizedValues() throws HBaseException { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(1000).build(); @@ -292,6 +347,13 @@ public void testStringCustomizedValues() { "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", htd.toStringCustomizedValues()); + + htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528").build(); + assertEquals( + "'testStringCustomizedValues', " + + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + + "MAX_FILESIZE => '10737942528 B (10GB 512KB)'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", + htd.toStringCustomizedValues()); } @Test diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9a6912a49ea9..d31cadd85299 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -700,6 +700,14 @@ public enum OperationStatusCode { public static final int HOUR_IN_SECONDS = 60 * 60; public static final int MINUTE_IN_SECONDS = 60; + /** + * KB, MB, GB, TB equivalent to how many bytes + */ + public static final long KB_IN_BYTES = 1024; + public static final long MB_IN_BYTES = 1024 * KB_IN_BYTES; + public static final long GB_IN_BYTES = 1024 * MB_IN_BYTES; + public static final long TB_IN_BYTES = 1024 * GB_IN_BYTES; + //TODO: although the following are referenced widely to format strings for // the shell. They really aren't a part of the public API. It would be // nice if we could put them somewhere where they did not need to be diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index ff7064b11430..83eb01a635fd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -41,11 +41,17 @@ public final class PrettyPrinter { "((\\d+)\\s*MINUTES?)?\\s*((\\d+)\\s*SECONDS?)?\\s*\\)?"; private static final Pattern INTERVAL_PATTERN = Pattern.compile(INTERVAL_REGEX, Pattern.CASE_INSENSITIVE); + private static final String SIZE_REGEX = "((\\d+)\\s*B?\\s*\\()?\\s*" + + "((\\d+)\\s*TB?)?\\s*((\\d+)\\s*GB?)?\\s*" + + "((\\d+)\\s*MB?)?\\s*((\\d+)\\s*KB?)?\\s*((\\d+)\\s*B?)?\\s*\\)?"; + private static final Pattern SIZE_PATTERN = Pattern.compile(SIZE_REGEX, + Pattern.CASE_INSENSITIVE); public enum Unit { TIME_INTERVAL, LONG, BOOLEAN, + BYTE, NONE } @@ -63,6 +69,9 @@ public static String format(final String value, final Unit unit) { byte[] booleanBytes = Bytes.toBytesBinary(value); human.append(String.valueOf(Bytes.toBoolean(booleanBytes))); break; + case BYTE: + human.append(humanReadableByte(Long.parseLong(value))); + break; default: human.append(value); } @@ -82,6 +91,9 @@ public static String valueOf(final String pretty, final Unit unit) throws HBaseE case TIME_INTERVAL: value.append(humanReadableIntervalToSec(pretty)); break; + case BYTE: + value.append(humanReadableSizeToBytes(pretty)); + break; default: value.append(pretty); } @@ -191,6 +203,116 @@ private static long humanReadableIntervalToSec(final String humanReadableInterva return ttl; } + /** + * Convert a long size to a human readable string. + * Example: 10763632640 -> 10763632640 B (10GB 25MB) + * @param size the size in bytes + * @return human readable string + */ + private static String humanReadableByte(final long size) { + StringBuilder sb = new StringBuilder(); + long tb, gb, mb, kb, b; + + if (size < HConstants.KB_IN_BYTES) { + sb.append(size); + sb.append(" B"); + return sb.toString(); + } + + tb = size / HConstants.TB_IN_BYTES; + gb = (size - HConstants.TB_IN_BYTES * tb) / HConstants.GB_IN_BYTES; + mb = (size - HConstants.TB_IN_BYTES * tb + - HConstants.GB_IN_BYTES * gb) / HConstants.MB_IN_BYTES; + kb = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb + - HConstants.MB_IN_BYTES * mb) / HConstants.KB_IN_BYTES; + b = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb + - HConstants.MB_IN_BYTES * mb - HConstants.KB_IN_BYTES * kb); + + sb.append(size).append(" B ("); + if (tb > 0) { + sb.append(tb); + sb.append("TB"); + } + + if (gb > 0) { + sb.append(tb > 0 ? " " : ""); + sb.append(gb); + sb.append("GB"); + } + + if (mb > 0) { + sb.append(tb + gb > 0 ? " " : ""); + sb.append(mb); + sb.append("MB"); + } + + if (kb > 0) { + sb.append(tb + gb + mb > 0 ? " " : ""); + sb.append(kb); + sb.append("KB"); + } + + if (b > 0) { + sb.append(tb + gb + mb + kb > 0 ? " " : ""); + sb.append(b); + sb.append("B"); + } + + sb.append(")"); + return sb.toString(); + } + + /** + * Convert a human readable size to bytes. + * Examples of the human readable size are: 50 GB 20 MB 1 KB , 25000 B etc. + * The units of size specified can be in uppercase as well as lowercase. Also, if a + * single number is specified without any time unit, it is assumed to be in bytes. + * @param humanReadableSize human readable size + * @return value in bytes + * @throws HBaseException + */ + private static long humanReadableSizeToBytes(final String humanReadableSize) + throws HBaseException { + if (humanReadableSize == null) { + return -1; + } + + try { + return Long.parseLong(humanReadableSize); + } catch(NumberFormatException ex) { + LOG.debug("Given size value is not a number, parsing for human readable format"); + } + + String tb = null; + String gb = null; + String mb = null; + String kb = null; + String b = null; + String expectedSize = null; + long size = 0; + + Matcher matcher = PrettyPrinter.SIZE_PATTERN.matcher(humanReadableSize); + if (matcher.matches()) { + expectedSize = matcher.group(2); + tb = matcher.group(4); + gb = matcher.group(6); + mb = matcher.group(8); + kb = matcher.group(10); + b = matcher.group(12); + } + size += tb != null ? Long.parseLong(tb)*HConstants.TB_IN_BYTES:0; + size += gb != null ? Long.parseLong(gb)*HConstants.GB_IN_BYTES:0; + size += mb != null ? Long.parseLong(mb)*HConstants.MB_IN_BYTES:0; + size += kb != null ? Long.parseLong(kb)*HConstants.KB_IN_BYTES:0; + size += b != null ? Long.parseLong(b):0; + + if (expectedSize != null && Long.parseLong(expectedSize) != size) { + throw new HBaseException("Malformed size string: values in byte and human readable" + + "format do not match"); + } + return size; + } + /** * Pretty prints a collection of any type to a string. Relies on toString() implementation of the * object type. diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index ee54ae7af2b2..6228ad78486d 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1472,7 +1472,7 @@ def list_locks # Parse arguments and update TableDescriptorBuilder accordingly # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity def update_tdb_from_arg(tdb, arg) - tdb.setMaxFileSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MAX_FILESIZE))) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE) + tdb.setMaxFileSize(arg.delete(TableDescriptorBuilder::MAX_FILESIZE)) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE) tdb.setReadOnly(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::READONLY))) if arg.include?(TableDescriptorBuilder::READONLY) tdb.setCompactionEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::COMPACTION_ENABLED))) if arg.include?(TableDescriptorBuilder::COMPACTION_ENABLED) tdb.setSplitEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::SPLIT_ENABLED))) if arg.include?(TableDescriptorBuilder::SPLIT_ENABLED) @@ -1480,7 +1480,7 @@ def update_tdb_from_arg(tdb, arg) tdb.setNormalizationEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZATION_ENABLED))) if arg.include?(TableDescriptorBuilder::NORMALIZATION_ENABLED) tdb.setNormalizerTargetRegionCount(JInteger.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_COUNT))) if arg.include?(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_COUNT) tdb.setNormalizerTargetRegionSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_SIZE))) if arg.include?(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_SIZE) - tdb.setMemStoreFlushSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE))) if arg.include?(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE) + tdb.setMemStoreFlushSize(arg.delete(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE)) if arg.include?(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE) tdb.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(TableDescriptorBuilder::DURABILITY))) if arg.include?(TableDescriptorBuilder::DURABILITY) tdb.setPriority(JInteger.valueOf(arg.delete(TableDescriptorBuilder::PRIORITY))) if arg.include?(TableDescriptorBuilder::PRIORITY) tdb.setFlushPolicyClassName(arg.delete(TableDescriptorBuilder::FLUSH_POLICY)) if arg.include?(TableDescriptorBuilder::FLUSH_POLICY) From 8a0b023023554ba0d65c72f57b242d84bfe2b132 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 4 Jan 2021 23:30:32 +0800 Subject: [PATCH 302/769] =?UTF-8?q?HBASE-25457=20Possible=20race=20in=20As?= =?UTF-8?q?yncConnectionImpl=20between=20getChoreServ=E2=80=A6=20(#2839)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Viraj Jasani --- .../hadoop/hbase/client/AsyncConnectionImpl.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 1dbb7e6d211a..8a1ac5aac76d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -28,8 +28,6 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.Optional; import java.util.concurrent.CompletableFuture; @@ -56,11 +54,11 @@ import org.apache.hadoop.hbase.util.ConcurrentMapUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; @@ -187,6 +185,9 @@ private void spawnRenewalChore(final UserGroupInformation user) { * @return ChoreService */ synchronized ChoreService getChoreService() { + if (isClosed()) { + throw new IllegalStateException("connection is already closed"); + } if (choreService == null) { choreService = new ChoreService("AsyncConn Chore Service"); } @@ -216,8 +217,11 @@ public void close() { e -> LOG.warn("failed to close clusterStatusListener", e)); IOUtils.closeQuietly(rpcClient, e -> LOG.warn("failed to close rpcClient", e)); IOUtils.closeQuietly(registry, e -> LOG.warn("failed to close registry", e)); - if (choreService != null) { - choreService.shutdown(); + synchronized (this) { + if (choreService != null) { + choreService.shutdown(); + choreService = null; + } } metrics.ifPresent(MetricsConnection::shutdown); ConnectionOverAsyncConnection c = this.conn; From 600be60a4bd4d3b3e9652027a0cb8bdd32016c6b Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Mon, 4 Jan 2021 23:34:38 +0800 Subject: [PATCH 303/769] HBASE-25447 remoteProc is suspended due to OOM ERROR (#2824) Some OMME can not cause the JVM to exit, like "java.lang.OutOfMemoryError: Direct buffer memory", "java.lang.OutOfMemoryError: unable to create new native thread", as they dont call vmError#next_OnError_command. So abort HMaster when uncaught exception occurs in TimeoutExecutor, the new active Hmaster will resume the suspended procedure. Signed-off-by: Duo Zhang Signed-off-by: stack Signed-off-by: Pankaj Kumar --- .../hbase/procedure2/RemoteProcedureDispatcher.java | 4 ++++ .../hbase/master/procedure/RSProcedureDispatcher.java | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 611fc86f9a3c..a060f14ccf9a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -106,6 +106,10 @@ public boolean start() { return true; } + protected void setTimeoutExecutorUncaughtExceptionHandler(UncaughtExceptionHandler eh) { + timeoutExecutor.setUncaughtExceptionHandler(eh); + } + public boolean stop() { if (!running.getAndSet(false)) { return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index 2f990cb0b064..d028bb40321b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -94,6 +94,7 @@ public boolean start() { if (!super.start()) { return false; } + setTimeoutExecutorUncaughtExceptionHandler(this::abort); if (master.isStopped()) { LOG.debug("Stopped"); return false; @@ -126,6 +127,13 @@ public boolean start() { return true; } + private void abort(Thread t, Throwable e) { + LOG.error("Caught error", e); + if (!master.isStopped() && !master.isStopping() && !master.isAborted()) { + master.abort("Aborting master", e); + } + } + @Override public boolean stop() { if (!super.stop()) { From 481662ab39f8803849001b002bc5a8470f667d1e Mon Sep 17 00:00:00 2001 From: Mate Szalay-Beko Date: Tue, 5 Jan 2021 09:24:24 +0100 Subject: [PATCH 304/769] HBASE-25318 Config option for IntegrationTestImportTsv where to generate HFiles to bulkload (#2777) IntegrationTestImportTsv is generating HFiles under the working directory of the current hdfs user executing the tool, before bulkloading it into HBase. Assuming you encrypt the HBase root directory within HDFS (using HDFS Transparent Encryption), you can bulkload HFiles only if they sit in the same encryption zone in HDFS as the HBase root directory itself. When IntegrationTestImportTsv is executed against a real distributed cluster and the working directory of the current user (e.g. /user/hbase) is not in the same encryption zone as the HBase root directory (e.g. /hbase/data) then you will get an exception: ``` ERROR org.apache.hadoop.hbase.regionserver.HRegion: There was a partial failure due to IO when attempting to load d : hdfs://mycluster/user/hbase/test-data/22d8460d-04cc-e032-88ca-2cc20a7dd01c/ IntegrationTestImportTsv/hfiles/d/74655e3f8da142cb94bc31b64f0475cc org.apache.hadoop.ipc.RemoteException(java.io.IOException): /user/hbase/test-data/22d8460d-04cc-e032-88ca-2cc20a7dd01c/ IntegrationTestImportTsv/hfiles/d/74655e3f8da142cb94bc31b64f0475cc can't be moved into an encryption zone. ``` In this commit I make it configurable where the IntegrationTestImportTsv generates the HFiles. Co-authored-by: Mate Szalay-Beko Signed-off-by: Peter Somogyi --- .../mapreduce/IntegrationTestImportTsv.java | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index c80d61c4ea66..28b4ae467dda 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; @@ -29,6 +30,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; +import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -66,6 +68,8 @@ public class IntegrationTestImportTsv extends Configured implements Tool { private static final String NAME = IntegrationTestImportTsv.class.getSimpleName(); private static final Logger LOG = LoggerFactory.getLogger(IntegrationTestImportTsv.class); + private static final String GENERATED_HFILE_FOLDER_PARAM_KEY = + "IntegrationTestImportTsv.generatedHFileFolder"; protected static final String simple_tsv = "row1\t1\tc1\tc2\n" + @@ -190,8 +194,8 @@ public void testGenerateAndLoad() throws Exception { void generateAndLoad(final TableName table) throws Exception { LOG.info("Running test testGenerateAndLoad."); String cf = "d"; - Path hfiles = new Path( - util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); + Path hfiles = initGeneratedHFilePath(table); + LOG.info("The folder where the HFiles will be generated: {}", hfiles.toString()); Map args = new HashMap<>(); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); @@ -220,6 +224,12 @@ public int run(String[] args) throws Exception { System.err.println(format("%s [genericOptions]", NAME)); System.err.println(" Runs ImportTsv integration tests against a distributed cluster."); System.err.println(); + System.err.println(" Use '-D" + GENERATED_HFILE_FOLDER_PARAM_KEY + "=' to define a"); + System.err.println(" base folder for the generated HFiles. If HDFS Transparent Encryption"); + System.err.println(" is configured, then make sure to set this parameter to a folder in"); + System.err.println(" the same encryption zone in HDFS as the HBase root directory,"); + System.err.println(" otherwise the bulkload will fail."); + System.err.println(); ToolRunner.printGenericCommandUsage(System.err); return 1; } @@ -237,6 +247,28 @@ public int run(String[] args) throws Exception { return 0; } + private Path initGeneratedHFilePath(final TableName table) throws IOException { + String folderParam = getConf().getTrimmed(GENERATED_HFILE_FOLDER_PARAM_KEY); + if (folderParam == null || folderParam.isEmpty()) { + // by default, fall back to the test data dir + return new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); + } + + Path hfiles = new Path(folderParam, UUID.randomUUID().toString()); + FileSystem fs = util.getTestFileSystem(); + String shouldPreserve = System.getProperty("hbase.testing.preserve.testdir", "false"); + if (!Boolean.parseBoolean(shouldPreserve)) { + if (fs.getUri().getScheme().equals(FileSystem.getLocal(getConf()).getUri().getScheme())) { + File localFoler = new File(hfiles.toString()); + localFoler.deleteOnExit(); + } else { + fs.deleteOnExit(hfiles); + } + } + return hfiles; + } + + public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); IntegrationTestingUtility.setUseDistributedCluster(conf); From 85842634e518155db3c964bf15555291d5fbdd45 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 5 Jan 2021 10:21:26 +0000 Subject: [PATCH 305/769] =?UTF-8?q?HBASE-24813=20ReplicationSource=20shoul?= =?UTF-8?q?d=20clear=20buffer=20usage=20on=20Replicatio=E2=80=A6=20(#2546)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ankit Singhal --- .../regionserver/ReplicationSource.java | 5 ++ .../ReplicationSourceShipper.java | 54 +++++++++++++++++++ .../ReplicationSourceWALReader.java | 3 +- .../regionserver/TestReplicationSource.java | 54 +++++++++++++++++-- 4 files changed, 112 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 423ec0e0005e..317db6628f59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -686,6 +686,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, Threads.shutdown(initThread, this.sleepForRetries); } Collection workers = workerThreads.values(); + for (ReplicationSourceShipper worker : workers) { worker.stopWorker(); if(worker.entryReader != null) { @@ -696,6 +697,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, if (this.replicationEndpoint != null) { this.replicationEndpoint.stop(); } + for (ReplicationSourceShipper worker : workers) { if (worker.isAlive() || worker.entryReader.isAlive()) { try { @@ -714,6 +716,9 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, worker.entryReader.interrupt(); } } + //If worker is already stopped but there was still entries batched, + //we need to clear buffer used for non processed entries + worker.clearWALEntryBatch(); } if (join) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index d3af995d6d9c..78bf42fb9045 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.atomic.LongAccumulator; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -323,4 +325,56 @@ void stopWorker() { public boolean isFinished() { return state == WorkerState.FINISHED; } + + /** + * Attempts to properly update ReplicationSourceManager.totalBufferUser, + * in case there were unprocessed entries batched by the reader to the shipper, + * but the shipper didn't manage to ship those because the replication source is being terminated. + * In that case, it iterates through the batched entries and decrease the pending + * entries size from ReplicationSourceManager.totalBufferUser + *

    + * NOTES + * 1) This method should only be called upon replication source termination. + * It blocks waiting for both shipper and reader threads termination, + * to make sure no race conditions + * when updating ReplicationSourceManager.totalBufferUser. + * + * 2) It does not attempt to terminate reader and shipper threads. Those must + * have been triggered interruption/termination prior to calling this method. + */ + void clearWALEntryBatch() { + long timeout = System.currentTimeMillis() + this.shipEditsTimeout; + while(this.isAlive() || this.entryReader.isAlive()){ + try { + if (System.currentTimeMillis() >= timeout) { + LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", + this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive()); + return; + } else { + // Wait both shipper and reader threads to stop + Thread.sleep(this.sleepForRetries); + } + } catch (InterruptedException e) { + LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. " + + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); + return; + } + } + LongAccumulator totalToDecrement = new LongAccumulator((a,b) -> a + b, 0); + entryReader.entryBatchQueue.forEach(w -> { + entryReader.entryBatchQueue.remove(w); + w.getWalEntries().forEach(e -> { + long entrySizeExcludeBulkLoad = ReplicationSourceWALReader.getEntrySizeExcludeBulkLoad(e); + totalToDecrement.accumulate(entrySizeExcludeBulkLoad); + }); + }); + if( LOG.isTraceEnabled()) { + LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.", + totalToDecrement.longValue()); + } + long newBufferUsed = source.getSourceManager().getTotalBufferUsed() + .addAndGet(-totalToDecrement.longValue()); + source.getSourceManager().getGlobalMetrics().setWALReaderEditsBufferBytes(newBufferUsed); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index c71db1bf785b..a6d87870b495 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -60,7 +60,8 @@ class ReplicationSourceWALReader extends Thread { private final WALEntryFilter filter; private final ReplicationSource source; - private final BlockingQueue entryBatchQueue; + @InterfaceAudience.Private + final BlockingQueue entryBatchQueue; // max (heap) size of each batch - multiply by number of batches in queue to get total private final long replicationBatchSizeCapacity; // max count of each batch - multiply by number of batches in queue to get total diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 796c0e3b18c7..50537b5e1be2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -22,7 +22,10 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.ArrayList; import java.util.OptionalLong; import java.util.UUID; import java.util.concurrent.ExecutorService; @@ -128,6 +131,8 @@ public void testDefaultSkipsMetaWAL() throws IOException { Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); + Mockito.when(manager.getGlobalMetrics()). + thenReturn(mock(MetricsReplicationGlobalSourceSource.class)); String queueId = "qid"; RegionServerServices rss = TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); @@ -269,6 +274,47 @@ public void testTerminateTimeout() throws Exception { } } + @Test + public void testTerminateClearsBuffer() throws Exception { + ReplicationSource source = new ReplicationSource(); + ReplicationSourceManager mockManager = mock(ReplicationSourceManager.class); + MetricsReplicationGlobalSourceSource mockMetrics = + mock(MetricsReplicationGlobalSourceSource.class); + AtomicLong buffer = new AtomicLong(); + Mockito.when(mockManager.getTotalBufferUsed()).thenReturn(buffer); + Mockito.when(mockManager.getGlobalMetrics()).thenReturn(mockMetrics); + ReplicationPeer mockPeer = mock(ReplicationPeer.class); + Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); + Configuration testConf = HBaseConfiguration.create(); + source.init(testConf, null, mockManager, null, mockPeer, null, + "testPeer", null, p -> OptionalLong.empty(), mock(MetricsSource.class)); + ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null, + conf, null, 0, null, source); + ReplicationSourceShipper shipper = + new ReplicationSourceShipper(conf, null, null, source); + shipper.entryReader = reader; + source.workerThreads.put("testPeer", shipper); + WALEntryBatch batch = new WALEntryBatch(10, logDir); + WAL.Entry mockEntry = mock(WAL.Entry.class); + WALEdit mockEdit = mock(WALEdit.class); + WALKeyImpl mockKey = mock(WALKeyImpl.class); + when(mockEntry.getEdit()).thenReturn(mockEdit); + when(mockEdit.isEmpty()).thenReturn(false); + when(mockEntry.getKey()).thenReturn(mockKey); + when(mockKey.estimatedSerializedSizeOf()).thenReturn(1000L); + when(mockEdit.heapSize()).thenReturn(10000L); + when(mockEdit.size()).thenReturn(0); + ArrayList cells = new ArrayList<>(); + KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"), + Bytes.toBytes("1"), Bytes.toBytes("v1")); + cells.add(kv); + when(mockEdit.getCells()).thenReturn(cells); + reader.addEntryToBatch(batch, mockEntry); + reader.entryBatchQueue.put(batch); + source.terminate("test"); + assertEquals(0, source.getSourceManager().getTotalBufferUsed().get()); + } + /** * Tests that recovered queues are preserved on a regionserver shutdown. * See HBASE-18192 @@ -438,12 +484,12 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception ServerName deadServer = ServerName.valueOf("www.deadServer.com", 12006, 1524679704419L); PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); queue.put(new Path("/www/html/test")); - RecoveredReplicationSource source = Mockito.mock(RecoveredReplicationSource.class); - Server server = Mockito.mock(Server.class); + RecoveredReplicationSource source = mock(RecoveredReplicationSource.class); + Server server = mock(Server.class); Mockito.when(server.getServerName()).thenReturn(serverName); Mockito.when(source.getServer()).thenReturn(server); Mockito.when(source.getServerWALsBelongTo()).thenReturn(deadServer); - ReplicationQueueStorage storage = Mockito.mock(ReplicationQueueStorage.class); + ReplicationQueueStorage storage = mock(ReplicationQueueStorage.class); Mockito.when(storage.getWALPosition(Mockito.eq(serverName), Mockito.any(), Mockito.any())) .thenReturn(1001L); Mockito.when(storage.getWALPosition(Mockito.eq(deadServer), Mockito.any(), Mockito.any())) @@ -468,6 +514,8 @@ private RegionServerServices setupForAbortTests(ReplicationSource rs, Configurat Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); + Mockito.when(manager.getGlobalMetrics()). + thenReturn(mock(MetricsReplicationGlobalSourceSource.class)); String queueId = "qid"; RegionServerServices rss = TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); From 90ff55000049ef1bae34344387b376fd265deeb0 Mon Sep 17 00:00:00 2001 From: Gary Wang <34413055+whua3@users.noreply.github.com> Date: Tue, 5 Jan 2021 22:39:19 +0800 Subject: [PATCH 306/769] HBASE-25463 fix comment error of append.rb (#2845) Signed-off-by: Viraj Jasani --- hbase-shell/src/main/ruby/shell/commands/append.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-shell/src/main/ruby/shell/commands/append.rb b/hbase-shell/src/main/ruby/shell/commands/append.rb index a7273ca4edfe..b469f0bda638 100644 --- a/hbase-shell/src/main/ruby/shell/commands/append.rb +++ b/hbase-shell/src/main/ruby/shell/commands/append.rb @@ -50,5 +50,5 @@ def append(table, row, column, value, args = {}) end end -# add incr comamnd to Table +# add append command to Table ::Hbase::Table.add_shell_command('append') From a5eb8f1f701103d66147adc8e4f4cca41edb1604 Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 5 Jan 2021 19:39:39 -0800 Subject: [PATCH 307/769] =?UTF-8?q?HBASE-25438=20Update=20create-release?= =?UTF-8?q?=20mvn=20in=20Dockerfile;=20its=203.6.0;=20make=20=E2=80=A6=20(?= =?UTF-8?q?#2807)=20Addendum.=20Missing=20-L=20on=20added=20curl.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dev-support/create-release/hbase-rm/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index ac443b64228d..c43976f61dd1 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -59,7 +59,7 @@ RUN mkdir -p /opt/maven \ # Install Apache Yetus ENV YETUS_VERSION 0.12.0 -RUN curl "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ +RUN curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ tar xvz -C /opt ENV YETUS_HOME /opt/apache-yetus-${YETUS_VERSION} From bedb45d4acbe2551c13a4fe5a524d2cb2d82a95d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 6 Jan 2021 15:13:10 +0800 Subject: [PATCH 308/769] HBASE-25458 HRegion methods cleanup (#2838) Signed-off-by: meiyi --- .../hadoop/hbase/regionserver/HRegion.java | 225 ++++++++---------- .../hbase/regionserver/RSRpcServices.java | 3 +- .../regionserver/TestCompactingMemStore.java | 6 +- .../TestCompactingToCellFlatMapMemStore.java | 3 +- .../regionserver/TestDefaultMemStore.java | 23 +- .../hbase/regionserver/TestHRegion.java | 45 ++-- 6 files changed, 126 insertions(+), 179 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 493b74b6b9ac..4ec61ac5c051 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -913,17 +913,19 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co } } - void setHTableSpecificConf() { - if (this.htableDescriptor == null) return; + private void setHTableSpecificConf() { + if (this.htableDescriptor == null) { + return; + } long flushSize = this.htableDescriptor.getMemStoreFlushSize(); if (flushSize <= 0) { flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, - TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); + TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); } this.memstoreFlushSize = flushSize; long mult = conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, - HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); this.blockingMemStoreSize = this.memstoreFlushSize * mult; } @@ -1336,7 +1338,7 @@ public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration * Increase the size of mem store in this region and the size of global mem * store */ - void incMemStoreSize(MemStoreSize mss) { + private void incMemStoreSize(MemStoreSize mss) { incMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), mss.getCellsCount()); } @@ -1356,7 +1358,7 @@ void decrMemStoreSize(MemStoreSize mss) { mss.getCellsCount()); } - void decrMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, + private void decrMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, int cellsCountDelta) { if (this.rsAccounting != null) { rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, offHeapSizeDelta); @@ -1987,7 +1989,7 @@ public boolean waitForFlushes(long timeout) { } } - protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool( + private ThreadPoolExecutor getStoreOpenAndCloseThreadPool( final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount()); int maxThreads = Math.min(numStores, @@ -1996,7 +1998,7 @@ protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool( return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } - protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( + ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount()); int maxThreads = Math.max(1, @@ -2006,7 +2008,7 @@ protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } - static ThreadPoolExecutor getOpenAndCloseThreadPool(int maxThreads, + private static ThreadPoolExecutor getOpenAndCloseThreadPool(int maxThreads, final String threadNamePrefix) { return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() { @@ -2475,11 +2477,11 @@ enum Result { boolean isCompactionNeeded(); } - public FlushResultImpl flushcache(boolean flushAllStores, boolean writeFlushRequestWalMarker, + FlushResultImpl flushcache(boolean flushAllStores, boolean writeFlushRequestWalMarker, FlushLifeCycleTracker tracker) throws IOException { - List families = null; + List families = null; if (flushAllStores) { - families = new ArrayList(); + families = new ArrayList<>(); families.addAll(this.getTableDescriptor().getColumnFamilyNames()); } return this.flushcache(families, writeFlushRequestWalMarker, tracker); @@ -2960,7 +2962,7 @@ private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarke @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", justification="Intentional; notify is about completed flush") - protected FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask status, + FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask status, PrepareFlushResult prepareResult, Collection storesToFlush) throws IOException { // prepare flush context is carried via PrepareFlushResult TreeMap storeFlushCtxs = prepareResult.storeFlushCtxs; @@ -3157,12 +3159,6 @@ private RegionScannerImpl getScanner(Scan scan, List additional } } - protected RegionScanner instantiateRegionScanner(Scan scan, - List additionalScanners) throws IOException { - return instantiateRegionScanner(scan, additionalScanners, HConstants.NO_NONCE, - HConstants.NO_NONCE); - } - protected RegionScannerImpl instantiateRegionScanner(Scan scan, List additionalScanners, long nonceGroup, long nonce) throws IOException { if (scan.isReversed()) { @@ -3177,9 +3173,8 @@ protected RegionScannerImpl instantiateRegionScanner(Scan scan, /** * Prepare a delete for a row mutation processor * @param delete The passed delete is modified by this method. WARNING! - * @throws IOException */ - public void prepareDelete(Delete delete) throws IOException { + private void prepareDelete(Delete delete) throws IOException { // Check to see if this is a deleteRow insert if(delete.getFamilyCellMap().isEmpty()){ for(byte [] family : this.htableDescriptor.getColumnFamilyNames()){ @@ -3203,38 +3198,18 @@ public void delete(Delete delete) throws IOException { startRegionOperation(Operation.DELETE); try { // All edits for the given row (across all column families) must happen atomically. - doBatchMutate(delete); + mutate(delete); } finally { closeRegionOperation(Operation.DELETE); } } - /** - * Row needed by below method. - */ - private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); - - /** - * This is used only by unit tests. Not required to be a public API. - * @param familyMap map of family to edits for the given family. - * @throws IOException - */ - void delete(NavigableMap> familyMap, - Durability durability) throws IOException { - Delete delete = new Delete(FOR_UNIT_TESTS_ONLY, HConstants.LATEST_TIMESTAMP, familyMap); - delete.setDurability(durability); - doBatchMutate(delete); - } - /** * Set up correct timestamps in the KVs in Delete object. - *

    Caller should have the row and region locks. - * @param mutation - * @param familyMap - * @param byteNow - * @throws IOException + *

    + * Caller should have the row and region locks. */ - public void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, + private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, byte[] byteNow) throws IOException { for (Map.Entry> e : familyMap.entrySet()) { @@ -3278,7 +3253,7 @@ public void prepareDeleteTimestamps(Mutation mutation, Map> f } } - void updateDeleteLatestVersionTimestamp(Cell cell, Get get, int count, byte[] byteNow) + private void updateDeleteLatestVersionTimestamp(Cell cell, Get get, int count, byte[] byteNow) throws IOException { List result = get(get, false); @@ -3306,7 +3281,7 @@ public void put(Put put) throws IOException { startRegionOperation(Operation.PUT); try { // All edits for the given row (across all column families) must happen atomically. - doBatchMutate(put); + mutate(put); } finally { closeRegionOperation(Operation.PUT); } @@ -3353,7 +3328,7 @@ public BatchOperation(final HRegion region, T[] operations) { * Visitor interface for batch operations */ @FunctionalInterface - public interface Visitor { + interface Visitor { /** * @param index operation index * @return If true continue visiting remaining entries, break otherwise @@ -3759,14 +3734,17 @@ protected void applyFamilyMapToMemStore(Map> familyMap, /** - * Batch of mutation operations. Base class is shared with {@link ReplayBatchOperation} as most - * of the logic is same. + * Batch of mutation operations. Base class is shared with {@link ReplayBatchOperation} as most of + * the logic is same. */ - static class MutationBatchOperation extends BatchOperation { + private static class MutationBatchOperation extends BatchOperation { + private long nonceGroup; + private long nonce; + public MutationBatchOperation(final HRegion region, Mutation[] operations, boolean atomic, - long nonceGroup, long nonce) { + long nonceGroup, long nonce) { super(region, operations); this.atomic = atomic; this.nonceGroup = nonceGroup; @@ -4401,10 +4379,12 @@ private void mergeFamilyMaps(Map> familyMap, * Batch of mutations for replay. Base class is shared with {@link MutationBatchOperation} as most * of the logic is same. */ - static class ReplayBatchOperation extends BatchOperation { + private static final class ReplayBatchOperation extends BatchOperation { + private long origLogSeqNum = 0; + public ReplayBatchOperation(final HRegion region, MutationReplay[] operations, - long origLogSeqNum) { + long origLogSeqNum) { super(region, operations); this.origLogSeqNum = origLogSeqNum; } @@ -4512,12 +4492,12 @@ public void completeMiniBatchOperations( } } - public OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, - long nonce) throws IOException { + private OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, + long nonce) throws IOException { // As it stands, this is used for 3 things - // * batchMutate with single mutation - put/delete/increment/append, separate or from - // checkAndMutate. - // * coprocessor calls (see ex. BulkDeleteEndpoint). + // * batchMutate with single mutation - put/delete/increment/append, separate or from + // checkAndMutate. + // * coprocessor calls (see ex. BulkDeleteEndpoint). // So nonces are not really ever used by HBase. They could be by coprocs, and checkAnd... return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce)); } @@ -4525,8 +4505,12 @@ public OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long @Override public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException { // If the mutations has any Increment/Append operations, we need to do batchMutate atomically - boolean atomic = Arrays.stream(mutations) - .anyMatch(m -> m instanceof Increment || m instanceof Append); + boolean atomic = + Arrays.stream(mutations).anyMatch(m -> m instanceof Increment || m instanceof Append); + return batchMutate(mutations, atomic); + } + + OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic) throws IOException { return batchMutate(mutations, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); } @@ -4556,24 +4540,23 @@ public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqI /** * Perform a batch of mutations. - * + *

    * Operations in a batch are stored with highest durability specified of for all operations in a * batch, except for {@link Durability#SKIP_WAL}. - * - *

    This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with + *

    + * This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with * {@link ReplayBatchOperation} instance and {@link #batchMutate(Mutation[])} with - * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch - * and mutation batch is very similar, lot of code is shared by providing generic methods in - * base class {@link BatchOperation}. The logic for this method and - * {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which - * are overridden by derived classes to implement special behavior. - * + * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch and + * mutation batch is very similar, lot of code is shared by providing generic methods in base + * class {@link BatchOperation}. The logic for this method and + * {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which are + * overridden by derived classes to implement special behavior. * @param batchOp contains the list of mutations - * @return an array of OperationStatus which internally contains the - * OperationStatusCode and the exceptionMessage if any. + * @return an array of OperationStatus which internally contains the OperationStatusCode and the + * exceptionMessage if any. * @throws IOException if an IO problem is encountered */ - OperationStatus[] batchMutate(BatchOperation batchOp) throws IOException { + private OperationStatus[] batchMutate(BatchOperation batchOp) throws IOException { boolean initialized = false; batchOp.startRegionOperation(); try { @@ -4727,7 +4710,7 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { * Returns effective durability from the passed durability and * the table descriptor. */ - protected Durability getEffectiveDurability(Durability d) { + private Durability getEffectiveDurability(Durability d) { return d == Durability.USE_DEFAULT ? this.regionDurability : d; } @@ -4916,7 +4899,7 @@ public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws // All edits for the given row (across all column families) must happen atomically. Result r; if (mutation != null) { - r = doBatchMutate(mutation, true).getResult(); + r = mutate(mutation, true).getResult(); } else { r = mutateRow(rowMutations); } @@ -4976,27 +4959,26 @@ private boolean matches(final CompareOperator op, final int compareResult) { return matches; } - private OperationStatus doBatchMutate(Mutation mutation) throws IOException { - return doBatchMutate(mutation, false); + private OperationStatus mutate(Mutation mutation) throws IOException { + return mutate(mutation, false); } - private OperationStatus doBatchMutate(Mutation mutation, boolean atomic) throws IOException { - return doBatchMutate(mutation, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); + private OperationStatus mutate(Mutation mutation, boolean atomic) throws IOException { + return mutate(mutation, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); } - private OperationStatus doBatchMutate(Mutation mutation, boolean atomic, long nonceGroup, - long nonce) throws IOException { - OperationStatus[] batchMutate = this.batchMutate(new Mutation[]{mutation}, atomic, - nonceGroup, nonce); - if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { - throw new FailedSanityCheckException(batchMutate[0].getExceptionMsg()); - } else if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { - throw new NoSuchColumnFamilyException(batchMutate[0].getExceptionMsg()); - } else if (batchMutate[0].getOperationStatusCode().equals( - OperationStatusCode.STORE_TOO_BUSY)) { - throw new RegionTooBusyException(batchMutate[0].getExceptionMsg()); + private OperationStatus mutate(Mutation mutation, boolean atomic, long nonceGroup, long nonce) + throws IOException { + OperationStatus[] status = + this.batchMutate(new Mutation[] { mutation }, atomic, nonceGroup, nonce); + if (status[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { + throw new FailedSanityCheckException(status[0].getExceptionMsg()); + } else if (status[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { + throw new NoSuchColumnFamilyException(status[0].getExceptionMsg()); + } else if (status[0].getOperationStatusCode().equals(OperationStatusCode.STORE_TOO_BUSY)) { + throw new RegionTooBusyException(status[0].getExceptionMsg()); } - return batchMutate[0]; + return status[0]; } /** @@ -5055,7 +5037,7 @@ private static void updateCellTimestamps(final Iterable> cellItr, fin /** * Possibly rewrite incoming cell tags. */ - void rewriteCellTags(Map> familyMap, final Mutation m) { + private void rewriteCellTags(Map> familyMap, final Mutation m) { // Check if we have any work to do and early out otherwise // Update these checks as more logic is added here if (m.getTTL() == Long.MAX_VALUE) { @@ -5077,15 +5059,17 @@ void rewriteCellTags(Map> familyMap, final Mutation m) { } } - /* + /** * Check if resources to support an update. - * - * We throw RegionTooBusyException if above memstore limit - * and expect client to retry using some kind of backoff - */ - void checkResources() throws RegionTooBusyException { + *

    + * We throw RegionTooBusyException if above memstore limit and expect client to retry using some + * kind of backoff + */ + private void checkResources() throws RegionTooBusyException { // If catalog region, do not impose resource constraints or block updates. - if (this.getRegionInfo().isMetaRegion()) return; + if (this.getRegionInfo().isMetaRegion()) { + return; + } MemStoreSize mss = this.memStoreSizing.getMemStoreSize(); if (mss.getHeapSize() + mss.getOffHeapSize() > this.blockingMemStoreSize) { @@ -5110,13 +5094,13 @@ void checkResources() throws RegionTooBusyException { /** * @throws IOException Throws exception if region is in read-only mode. */ - protected void checkReadOnly() throws IOException { + private void checkReadOnly() throws IOException { if (isReadOnly()) { throw new DoNotRetryIOException("region is read only"); } } - protected void checkReadsEnabled() throws IOException { + private void checkReadsEnabled() throws IOException { if (!this.writestate.readsEnabled) { throw new IOException(getRegionInfo().getEncodedName() + ": The region's reads are disabled. Cannot serve the request"); @@ -5130,21 +5114,6 @@ public void setReadsEnabled(boolean readsEnabled) { this.writestate.setReadsEnabled(readsEnabled); } - /** - * Add updates first to the wal and then add values to memstore. - *

    - * Warning: Assumption is caller has lock on passed in row. - * @param edits Cell updates by column - */ - void put(final byte[] row, byte[] family, List edits) throws IOException { - NavigableMap> familyMap; - familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - - familyMap.put(family, edits); - Put p = new Put(row, HConstants.LATEST_TIMESTAMP, familyMap); - doBatchMutate(p); - } - /** * @param delta If we are doing delta changes -- e.g. increment/append -- then this flag will be * set; when set we will run operations that make sense in the increment/append scenario @@ -5194,7 +5163,7 @@ private void checkFamily(final byte[] family, Durability durability) } } - void checkFamily(final byte[] family) throws NoSuchColumnFamilyException { + private void checkFamily(final byte[] family) throws NoSuchColumnFamilyException { if (!this.htableDescriptor.hasColumnFamily(family)) { throw new NoSuchColumnFamilyException( "Column family " + Bytes.toString(family) + " does not exist in region " + this @@ -6055,7 +6024,7 @@ private long loadRecoveredHFilesIfAny(Collection stores) throws IOExcept * Currently, this method is used to drop memstore to prevent memory leak * when replaying recovered.edits while opening region. */ - public MemStoreSize dropMemStoreContents() throws IOException { + private MemStoreSize dropMemStoreContents() throws IOException { MemStoreSizing totalFreedSize = new NonThreadSafeMemStoreSizing(); this.updatesLock.writeLock().lock(); try { @@ -8106,11 +8075,11 @@ public static Region openHRegion(final Region other, final CancelableProgressabl /** * Open HRegion. + *

    * Calls initialize and sets sequenceId. * @return Returns this */ - protected HRegion openHRegion(final CancelableProgressable reporter) - throws IOException { + private HRegion openHRegion(final CancelableProgressable reporter) throws IOException { try { // Refuse to open the region if we are missing local compression support TableDescriptorChecker.checkCompression(htableDescriptor); @@ -8255,7 +8224,7 @@ public List get(Get get, boolean withCoprocessor) throws IOException { return get(get, withCoprocessor, HConstants.NO_NONCE, HConstants.NO_NONCE); } - public List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) + private List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) throws IOException { List results = new ArrayList<>(); long before = EnvironmentEdgeManager.currentTime(); @@ -8619,7 +8588,7 @@ public Result append(Append append, long nonceGroup, long nonce) throws IOExcept startRegionOperation(Operation.APPEND); try { // All edits for the given row (across all column families) must happen atomically. - return doBatchMutate(append, true, nonceGroup, nonce).getResult(); + return mutate(append, true, nonceGroup, nonce).getResult(); } finally { closeRegionOperation(Operation.APPEND); } @@ -8636,7 +8605,7 @@ public Result increment(Increment increment, long nonceGroup, long nonce) throws startRegionOperation(Operation.INCREMENT); try { // All edits for the given row (across all column families) must happen atomically. - return doBatchMutate(increment, true, nonceGroup, nonce).getResult(); + return mutate(increment, true, nonceGroup, nonce).getResult(); } finally { closeRegionOperation(Operation.INCREMENT); } @@ -9176,15 +9145,11 @@ public void incrementFlushesQueuedCount() { flushesQueued.increment(); } - public long getReadPoint() { - return getReadPoint(IsolationLevel.READ_COMMITTED); - } - /** * If a handler thread is eligible for interrupt, make it ineligible. Should be paired * with {{@link #enableInterrupts()}. */ - protected void disableInterrupts() { + void disableInterrupts() { regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> false); } @@ -9192,7 +9157,7 @@ protected void disableInterrupts() { * If a handler thread was made ineligible for interrupt via {{@link #disableInterrupts()}, * make it eligible again. No-op if interrupts are already enabled. */ - protected void enableInterrupts() { + void enableInterrupts() { regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> true); } @@ -9364,7 +9329,7 @@ public void requestFlush(FlushLifeCycleTracker tracker) throws IOException { * features * @param conf region configurations */ - static void decorateRegionConfiguration(Configuration conf) { + private static void decorateRegionConfiguration(Configuration conf) { if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) { String plugins = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,""); String replicationCoprocessorClass = ReplicationObserver.class.getCanonicalName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 78926d6c39d5..f8323c6a1164 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1003,8 +1003,7 @@ private void doBatchOp(final RegionActionResult.Builder builder, final HRegion r Arrays.sort(mArray, (v1, v2) -> Row.COMPARATOR.compare(v1, v2)); } - OperationStatus[] codes = region.batchMutate(mArray, atomic, HConstants.NO_NONCE, - HConstants.NO_NONCE); + OperationStatus[] codes = region.batchMutate(mArray, atomic); // When atomic is true, it indicates that the mutateRow API or the batch API with // RowMutations is called. In this case, we need to merge the results of the diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 9b336c21fc67..673369091d22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -710,8 +710,7 @@ public void testCompaction2Buckets() throws IOException { mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot // simulate flusher - region.decrMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), - mss.getCellsCount()); + region.decrMemStoreSize(mss); ImmutableSegment s = memstore.getSnapshot(); assertEquals(7, s.getCellsCount()); assertEquals(0, regionServicesForStores.getMemStoreSize()); @@ -788,8 +787,7 @@ public void testCompaction3Buckets() throws IOException { mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot // simulate flusher - region.decrMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), - mss.getCellsCount()); + region.decrMemStoreSize(mss); ImmutableSegment s = memstore.getSnapshot(); assertEquals(4, s.getCellsCount()); assertEquals(0, regionServicesForStores.getMemStoreSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index 617caeccd81e..072daa80210a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -282,8 +282,7 @@ public void testCompaction3Buckets() throws IOException { mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot // simulate flusher - region.decrMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), - mss.getCellsCount()); + region.decrMemStoreSize(mss); ImmutableSegment s = memstore.getSnapshot(); assertEquals(4, s.getCellsCount()); assertEquals(0, regionServicesForStores.getMemStoreSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 12bfc667c2d7..986ffd0b4c54 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -26,7 +26,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.NavigableMap; import java.util.Objects; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; @@ -971,28 +973,23 @@ public void testShouldFlushMeta() throws Exception { } /** - * Inserts a new region's meta information into the passed - * meta region. Used by the HMaster bootstrap code adding - * new table to hbase:meta table. - * + * Inserts a new region's meta information into the passed meta region. * @param meta hbase:meta HRegion to be updated * @param r HRegion to add to meta - * - * @throws IOException */ - public static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException { - meta.checkResources(); + private static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException { // The row key is the region name byte[] row = r.getRegionInfo().getRegionName(); final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList<>(2); - cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER, now, RegionInfo.toByteArray(r.getRegionInfo()))); + cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, now, + RegionInfo.toByteArray(r.getRegionInfo()))); // Set into the root table the version of the meta table. - cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - HConstants.META_VERSION_QUALIFIER, now, + cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER, now, Bytes.toBytes(HConstants.META_VERSION))); - meta.put(row, HConstants.CATALOG_FAMILY, cells); + NavigableMap> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + familyMap.put(HConstants.CATALOG_FAMILY, cells); + meta.put(new Put(row, HConstants.LATEST_TIMESTAMP, familyMap)); } private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index fcbc718296ae..58668933c61f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -26,6 +26,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; @@ -43,7 +44,6 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.math.BigDecimal; -import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -137,7 +137,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.regionserver.HRegion.MutationBatchOperation; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.Region.RowLock; @@ -1679,9 +1678,7 @@ public void testAtomicBatchPut() throws IOException { long syncs = prepareRegionForBachPut(puts, source, false); // 1. Straight forward case, should succeed - MutationBatchOperation batchOp = new MutationBatchOperation(region, puts, true, - HConstants.NO_NONCE, HConstants.NO_NONCE); - OperationStatus[] codes = this.region.batchMutate(batchOp); + OperationStatus[] codes = this.region.batchMutate(puts, true); assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { assertEquals(OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode()); @@ -1695,15 +1692,11 @@ public void testAtomicBatchPut() throws IOException { MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(CONF); final AtomicReference retFromThread = new AtomicReference<>(); final CountDownLatch finishedPuts = new CountDownLatch(1); - final MutationBatchOperation finalBatchOp = new MutationBatchOperation(region, puts, true, - HConstants - .NO_NONCE, - HConstants.NO_NONCE); TestThread putter = new TestThread(ctx) { @Override public void doWork() throws IOException { try { - region.batchMutate(finalBatchOp); + region.batchMutate(puts, true); } catch (IOException ioe) { LOG.error("test failed!", ioe); retFromThread.set(ioe); @@ -1730,10 +1723,8 @@ public void doWork() throws IOException { // 3. Exception thrown in validation LOG.info("Next a batch put with one invalid family"); puts[5].addColumn(Bytes.toBytes("BAD_CF"), qual, value); - batchOp = new MutationBatchOperation(region, puts, true, HConstants.NO_NONCE, - HConstants.NO_NONCE); thrown.expect(NoSuchColumnFamilyException.class); - this.region.batchMutate(batchOp); + this.region.batchMutate(puts, true); } @Test @@ -3172,23 +3163,19 @@ public void testDelete_CheckFamily() throws IOException { List kvs = new ArrayList<>(); kvs.add(new KeyValue(row1, fam4, null, null)); + byte[] forUnitTestsOnly = Bytes.toBytes("ForUnitTestsOnly"); + // testing existing family - byte[] family = fam2; NavigableMap> deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - deleteMap.put(family, kvs); - region.delete(deleteMap, Durability.SYNC_WAL); + deleteMap.put(fam2, kvs); + region.delete(new Delete(forUnitTestsOnly, HConstants.LATEST_TIMESTAMP, deleteMap)); // testing non existing family - boolean ok = false; - family = fam4; - try { - deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - deleteMap.put(family, kvs); - region.delete(deleteMap, Durability.SYNC_WAL); - } catch (Exception e) { - ok = true; - } - assertTrue("Family " + new String(family, StandardCharsets.UTF_8) + " does exist", ok); + NavigableMap> deleteMap2 = new TreeMap<>(Bytes.BYTES_COMPARATOR); + deleteMap2.put(fam4, kvs); + assertThrows("Family " + Bytes.toString(fam4) + " does exist", + NoSuchColumnFamilyException.class, + () -> region.delete(new Delete(forUnitTestsOnly, HConstants.LATEST_TIMESTAMP, deleteMap2))); } @Test @@ -3549,6 +3536,8 @@ public void testDelete_CheckTimestampUpdated() throws IOException { byte[] col2 = Bytes.toBytes("col2"); byte[] col3 = Bytes.toBytes("col3"); + byte[] forUnitTestsOnly = Bytes.toBytes("ForUnitTestsOnly"); + // Setting up region this.region = initHRegion(tableName, method, CONF, fam1); // Building checkerList @@ -3559,12 +3548,12 @@ public void testDelete_CheckTimestampUpdated() throws IOException { NavigableMap> deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); deleteMap.put(fam1, kvs); - region.delete(deleteMap, Durability.SYNC_WAL); + region.delete(new Delete(forUnitTestsOnly, HConstants.LATEST_TIMESTAMP, deleteMap)); // extract the key values out the memstore: // This is kinda hacky, but better than nothing... long now = System.currentTimeMillis(); - AbstractMemStore memstore = (AbstractMemStore)region.getStore(fam1).memstore; + AbstractMemStore memstore = (AbstractMemStore) region.getStore(fam1).memstore; Cell firstCell = memstore.getActive().first(); assertTrue(firstCell.getTimestamp() <= now); now = firstCell.getTimestamp(); From a414361ed969dabece4aa0367362500e5e46352b Mon Sep 17 00:00:00 2001 From: mokai Date: Wed, 6 Jan 2021 22:27:58 +0800 Subject: [PATCH 309/769] HBASE-24755 [LOG][RSGroup]Error message is confusing while adding a offline RS to rsgroup (#2846) Signed-off-by: Viraj Jasani Signed-off-by: Wellington Chevreuil --- .../apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java | 6 +++--- .../org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 9850917e795d..3ef9365456fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -1233,9 +1233,9 @@ public void moveServers(Set

    servers, String targetGroupName) throws IOE Address firstServer = servers.iterator().next(); RSGroupInfo srcGrp = getRSGroupOfServer(firstServer); if (srcGrp == null) { - // Be careful. This exception message is tested for in TestRSGroupsBase... - throw new ConstraintException("Source RSGroup for server " + firstServer - + " does not exist."); + // Be careful. This exception message is tested for in TestRSGroupAdmin2... + throw new ConstraintException("Server " + firstServer + + " is either offline or it does not exist."); } // Only move online servers (when moving from 'default') or servers from other diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index 983414236c3b..f31e80fa7ee6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -189,7 +189,7 @@ public void testMoveServers() throws Exception { ADMIN.moveServersToRSGroup(Sets.newHashSet(Address.fromString("foo:9999")), "foo"); fail("Bogus servers shouldn't have been successfully moved."); } catch (IOException ex) { - String exp = "Source RSGroup for server foo:9999 does not exist."; + String exp = "Server foo:9999 is either offline or it does not exist."; String msg = "Expected '" + exp + "' in exception message: "; assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp)); } @@ -337,7 +337,7 @@ public boolean evaluate() throws Exception { ADMIN.setRSGroup(Sets.newHashSet(tableName), newGroup.getName()); fail("Bogus servers shouldn't have been successfully moved."); } catch (IOException ex) { - String exp = "Source RSGroup for server foo:9999 does not exist."; + String exp = "Server foo:9999 is either offline or it does not exist."; String msg = "Expected '" + exp + "' in exception message: "; assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp)); } From 4b6215297d585af425c823e1fe78457f0e038b68 Mon Sep 17 00:00:00 2001 From: Anjan Das Date: Thu, 7 Jan 2021 15:31:50 +0530 Subject: [PATCH 310/769] HBASE-25445: Use WAL FS instead of master FS in SplitWALManager (#2844) Signed-off-by: Pankaj Signed-off-by: ramkrish86 Signed-off-by: Viraj Jasani --- .../hadoop/hbase/master/SplitWALManager.java | 3 +- .../hbase/master/TestSplitWALManager.java | 64 +++++++++++++++++++ 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index aa91c84cb672..6db094c4e6df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -85,8 +85,7 @@ public SplitWALManager(MasterServices master) throws IOException { this.splitWorkerAssigner = new SplitWorkerAssigner(this.master, conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER)); this.rootDir = master.getMasterFileSystem().getWALRootDir(); - // TODO: This should be the WAL FS, not the Master FS? - this.fs = master.getMasterFileSystem().getFileSystem(); + this.fs = master.getMasterFileSystem().getWALFileSystem(); this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index 7edb011f97f4..10eda749891d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -31,6 +31,14 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -43,6 +51,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.junit.After; @@ -54,6 +63,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category({ MasterTests.class, LargeTests.class }) @@ -63,6 +74,7 @@ public class TestSplitWALManager { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSplitWALManager.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSplitWALManager.class); private static HBaseTestingUtility TEST_UTIL; private HMaster master; private SplitWALManager splitWALManager; @@ -86,6 +98,58 @@ public void teardown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + @Test + public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{ + HBaseTestingUtility test_util_2 = new HBaseTestingUtility(); + Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); + test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); + CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir); + test_util_2.startMiniCluster(3); + HMaster master2 = test_util_2.getHBaseCluster().getMaster(); + LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem() + .getFileSystem().getUri()); + LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem() + .getWALFileSystem().getUri()); + Table table = test_util_2.createTable(TABLE_NAME, FAMILY); + test_util_2.waitTableAvailable(TABLE_NAME); + Admin admin = test_util_2.getAdmin(); + MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster() + .getMasterProcedureExecutor().getEnvironment(); + final ProcedureExecutor executor = test_util_2.getMiniHBaseCluster() + .getMaster().getMasterProcedureExecutor(); + List regionInfos = admin.getRegions(TABLE_NAME); + SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure( + env, regionInfos.get(0), Bytes.toBytes("row5")); + // Populate some rows in the table + LOG.info("Beginning put data to the table: " + TABLE_NAME.toString()); + int rowCount = 5; + for (int i = 0; i < rowCount; i++) { + byte[] row = Bytes.toBytes("row" + i); + Put put = new Put(row); + put.addColumn(FAMILY, FAMILY, FAMILY); + table.put(put); + } + executor.submitProcedure(splitProcedure); + LOG.info("Submitted SplitProcedure."); + test_util_2.waitFor(30000, () -> executor.getProcedures().stream() + .filter(p -> p instanceof TransitRegionStateProcedure) + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); + test_util_2.getMiniHBaseCluster().killRegionServer( + test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName()); + test_util_2.getMiniHBaseCluster().startRegionServer(); + test_util_2.waitUntilNoRegionsInTransition(); + Scan scan = new Scan(); + ResultScanner results = table.getScanner(scan); + int scanRowCount = 0; + while (results.next() != null) { + scanRowCount++; + } + Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount + + " were expected.", rowCount, scanRowCount); + test_util_2.shutdownMiniCluster(); + } + @Test public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>(); From 2444d268901644d90def3fca39505627ff956b40 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 7 Jan 2021 09:44:33 -0800 Subject: [PATCH 311/769] HBASE-25468 Update git-jira-audit fallback_actions file with recent exceptions (#2852) Signed-off-by: huaxiangsun --- .../fallback_actions.csv | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/dev-support/git-jira-release-audit/fallback_actions.csv b/dev-support/git-jira-release-audit/fallback_actions.csv index eb6c97c1c567..72bdf8f419b2 100644 --- a/dev-support/git-jira-release-audit/fallback_actions.csv +++ b/dev-support/git-jira-release-audit/fallback_actions.csv @@ -22,12 +22,16 @@ hexsha,action,jira_id 0057cd8ca7ff09ed6b794af71df301c5c47487f4,SKIP, 022f30ce0dd3dd931f6045c6778e194ef5c41f7a,SKIP, +048cee6e47022194a1c2bf84cdb9e2873c7f74dd,SKIP, 0505072c5182841ad1a28d798527c69bcc3348f0,SKIP, +057d83cfafd8d659576869f1e71e3e75029fbad3,SKIP, 05cb051423953b913156e4950b67f3d9b28ada5f,REVERT,HBASE-14391 05f8e94191ef6a63baadf56d6114d7d0317796f2,SKIP, 0791b878422eadf00b55076338f09bf059f39f0c,SKIP, 07f9f3d38cf4d0d01044ab28d90a50a1a009f6b8,SKIP, +081d65de93587f77c22675497c5f3314bf21ded3,SKIP, 0bff1305134b9c3a0bcad21900f5af68a8aedb4a,SKIP, +0ebc96e0491dde1aed25f7a5f7ba1df5ed2042c5,SKIP, 10f00547627076d79d77cf58dd2deaece2287084,ADD,HBASE-22330 10f3b77748a02a2c11635c33964929c0474e890d,SKIP, 1196e42362312080d3c523c107b5e8fefef9e57e,SKIP, @@ -63,19 +67,23 @@ hexsha,action,jira_id 2e4544a8b00766248c998850f8907511b8bae240,SKIP, 2e63f882c85fb5804aafff5d92503eca60c0820d,SKIP, 2ebd80499473bbac3eac083806211ec03e084db7,SKIP, +30ab9665068ba85ddfabf0d4e21f4da28d24404e,SKIP, 31b9096034e19171989fd5b76313e7e0f1a9a12a,SKIP, 31d37fb904c4fcd77e79f9df8db155c5a3d1d8ed,SKIP, 31fe5dbf6b3a261f2c902d0fd6b82bf6c7ecf954,REVERT,HBASE-19685 31fe5dbf6b3a261f2c902d0fd6b82bf6c7ecf954,SKIP, +34b2b48a6f6c5546f98a6716cfc6f5f001ed2f1d,SKIP, 34e97f9c08d97b38be9a8f7dda6214d7ae9c6ea8,SKIP, 34ecc75d1669e330c78c3e9b832eca0abf57902d,SKIP, 34fe1f5fd762e4ead3b0e2e820c360796939b315,SKIP, +361e81e1f893ae1bc923ef49d38b1832dbc6a253,SKIP, 37d46fcf85da772a06da29d9add8a0652330f6c5,SKIP, 38e2dbc503a7f9ef929ff11b615157f0ee79916c,SKIP, 3966d0fee6c9803cf567ef76d91855a1eaad621d,SKIP, 399b3e9d1bc68c2709565f0a1a719a9a66999564,SKIP, 39a4c56690eeeb2bb5ffaa0f3c8f6759b4fb3fb2,SKIP, 3a11028cdfc6e44576069bed452a0ed10c860db1,SKIP, +3a8b4d67386967b50a42941814801a2874d994eb,SKIP, 3b73ebb7b8975e18c67c24c258fbc061614bb7f2,SKIP, 3c7a349c2eab74a76c06b66df2e2d14ea7681f95,SKIP, 3dcb03947ce9cb1825167784992e689a23847351,ADD,HBASE-18290 @@ -118,6 +126,7 @@ hexsha,action,jira_id 6b54917d520d32d00f5b4e9420e0d4894aaa34e8,SKIP, 6cf647a0dfd696580c1d841e245d563beca451dd,SKIP, 6e376b900e125f71a71fd2a25c3ad08057b97f73,SKIP, +6f36c79c2fd0aadb204aed5a8f2459edfe153907,SKIP, 719993e0fe2b132b75a3689267ae4adff364b6aa,SKIP, 71ed7033675149956de855b6782e1e22fc908dc8,SKIP, 7242650afd466df511ba2d4cfa34f6d082cb1004,SKIP, @@ -137,10 +146,12 @@ hexsha,action,jira_id 7ea18e20680e86c200cbebc885ff91cfc1f72fac,SKIP, 80971f83d307ab661d830f1a2196729411873906,SKIP, 80d1f62cf7eaaeea569fe5a2e4a91fc270e7bc1f,SKIP, +825bdfb30413f205306debc14b120f1d33b52cc1,REVERT,HBASE-24713 829e6383d52e7a98947a4b2bdaa0b7e756bc6bfc,SKIP, 834488d435fb59d5cb2b0ed7f09b8b1e70d7e327,SKIP, 86242e1f55da7df6a2119389897d11356e6bbc2a,SKIP, 8670fb3339accf149d098552f523e9c14b90c941,SKIP, +87ce2cff979df88eed3ac2e530068fe2506a6fb6,SKIP, 880c7c35fc50f28ec3e072a4c62a348fc964e9e0,SKIP, 88ff206c57fac513b3c5442fd4369ced416279da,SKIP, 8aa1214a1722ba491d52cbbfab1b39cbd0eddeea,SKIP, @@ -149,6 +160,7 @@ hexsha,action,jira_id 8ef87ce4343e80321fcfd99594372759557c90f2,SKIP, 9213d7194ede5b723bc817a9bb634679ee3ce5c1,SKIP, 930f68c0b976a600066b838283a0f3dce050256f,SKIP, +94a03d7ae2ba2986fd359720704b88808d50f623,ADD,HBASE-24713 962d7e9bf06f4e2e569ba34acae6203b4deef778,ADD,HBASE-19074 97d7b3572cc661a8d31f82b9c567d7a75b9eef95,SKIP, 99e18fed23a2a476514fa4bd500b07a8d913e330,SKIP, @@ -163,7 +175,9 @@ a05cef75c4b33171ab29d89d0fbb0fbbc11d6d39,SKIP, a312705dbc8e6d604adcc874526294c72b8ff580,SKIP, a67481209f5d315f06e3a6910fa44493e398210f,REVERT,HBASE-16840 a72d40694116d84454f480c961c1cc1f5d7e1deb,SKIP, +a77829d5b7d627e904d13b9ffce41044b56d0feb,SKIP, a80799a3bc73513393f764df330704ad688140e8,SKIP, +aa5b28a7797564e021dd57626bebe911ad5da727,SKIP, aa8a9997792b686a606e8ada2cd34fb9ad895bc0,SKIP, aaeb488f43a9e79655275ddb481ba970b49d1173,SKIP, ac9035db199902533c07d80f384ae29c115d3ad5,SKIP, @@ -171,11 +185,17 @@ ad2064d8a5ff57d021852c3210a30c5f58eaa43c,SKIP, ad885a0baae21b943ffebef168c65650f8317023,SKIP, adec117e47a2ca503458954d6877667d877890fd,SKIP, ae95b1f215a120890de5454739651911749057ca,SKIP, +af1fa22e4dc824f8cb73ed682ee7c94fbae7a1c8,SKIP, +b0863c5832024033bc13efa3edb7c57b3b753996,SKIP, +b0863c5832024033bc13efa3edb7c57b3b753996,SKIP, b182030d48dcc89d8c26b98f2a58d7909957ea49,SKIP, +b33c200a28d6f26e68e3e2e651b7da463f030dc2,SKIP, b3d55441b8174c704ada4585603f6bcfca298843,SKIP, +b44cf90220ad58ab21852e451e505d4342ca022d,SKIP, b65231d04dbc565a578ce928e809aa51f5439857,SKIP, b6549007b313e8f3aa993d5c1ebd29c84ccb7b7b,SKIP, b6d4fc955fe0fc41f5225f1cc2e3e4b92029251c,SKIP, +b78f4367f710a8cb2b3df37ba158604e530301dc,SKIP, b9c676cdc048c52f927cfa906fd18ff412e4ca20,SKIP, b9f5c6b065ebd572193c1fdc9d38557320b42fe6,SKIP, bcadcef21048e4764f7ae8dec3ce52884f20c02c,SKIP, @@ -184,7 +204,9 @@ bd2c03dc7df600fe481ba7f2fed958deb18f5291,SKIP, bd4e14db07ea32a45c3ef734e06d195a405da67c,SKIP, bd4eba2b53b7af738fd9584511d737c4393d0855,SKIP, bef0616ef33306afca3060b96c2cba5f9762035d,SKIP, +c03ec837e70ebf014aabd8610d5fe4d53b239efa,SKIP, c100fb835a54be6002fe9704349e726f27b15b7a,SKIP, +c40b4781e4ae49308d5ac037364772de75f4f4e2,SKIP, c5e0a1397b3c6a14612e4c5b66f995c02de4310b,SKIP, c71da858ada94e1b93065f0b7caf3558942bc4da,SKIP, c89cfd3406823cf05fa83464c5ddee16bf0d473f,ADD,HBASE-17248 @@ -201,6 +223,7 @@ ce6a6014daded424d9460f7de4eadae169f52683,SKIP, cf1ccc30909bfb04326415e5a648605759d57360,SKIP, cf45c8d30a4d9810cd676b2a1a348141c4e27eeb,SKIP, d14e335edc9c22c30827bc75e73b5303ca64ee0d,SKIP, +d2c1886bf4df5746c05af7bc9b82715ead0b9d8e,ADD,HBASE-25450 d32230d0b5a4706b625cc7ac7ee7d28f44bd7b85,SKIP, d524768528cd15151ba1ebb82e32609da5308128,SKIP, d5a1b276270a1d41f21badd5b85d9502f8f9f415,SKIP, @@ -208,6 +231,7 @@ d6e85b0511396b3221cc7f495eaee5bbacc42afd,SKIP, d91908b0d46156fa364ba11d476b9cdbc01d0411,SKIP, da619282469c65dcf6bee06783c4246a24a1517c,SKIP, da8bcabb99ee5a9a35efd114aa45292616ca3c70,SKIP, +db7ad07081343df040b7d41b8881155257a02db5,SKIP, dfb1af48927a66aa5baa5b182e84327770b3c6c9,SKIP, e075492b4dac5c347b7f6b2e5318e2967b95b18b,SKIP, e08277ac8fe466bf63f6fc342256ab7b8d41243a,SKIP, @@ -218,11 +242,14 @@ e40fcee6b54712b76d702af6937c3320c60df2b9,SKIP, e501fe1a296be8fec0890e7e15414683aa3d933b,SKIP, e5349d589c000e395e12340e003aa9e2153afea6,SKIP, e5fb8214b2bfd6396539a4e8b6cf5f3cc5e9c06f,REVERT,HBASE-21874 +e67d7516ec4b4be0f0d9258af9f8c714b0bb7c58,SKIP, e869a20123afe326e198d35d110f5c0360ea244f,SKIP, e8e45ef8f2fb91a870399636b492d5cee58a4c39,SKIP, e92a147e1961366e36a39577816994566e1e21c5,SKIP, eacf3cb29641af1a68978d9bd7654f643a3aa3a1,SKIP, ec251bdd3649de7f30ece914c7930498e642527e,SKIP, +ec39d59161790d70e0b850b90dbd4101c5b6f895,SKIP, +ec39d59161790d70e0b850b90dbd4101c5b6f895,SKIP, ec39dc8c149b9f89a91596d57d27de812973f0a9,SKIP, ed520133d6dbb47a40f1883a56460582732f863a,SKIP, ed62e08786273587378b86278fae452dfc817dfb,SKIP, @@ -232,11 +259,15 @@ f0b1c4279eaf09d255336d1de9c2bc2b5d726e70,SKIP, f4acc47e2debb3d3d87c05436d940ef2fdfe0be3,SKIP, f6095adea64912deaebfaf2a6a5881b820d315b2,SKIP, f61f02b2b24af39545cc2754cfbc25122da60651,SKIP, +f66c80b6a655a6a39cdaba1af50918abcefff303,SKIP, f6d6bf59faa2a4a0767480af7658e4a844fd186f,SKIP, +f7bc7be1eb1ae7cd8ab09754845480e32a509384,SKIP, fab0b2e60385fca20021f74335a9c3d36368f621,SKIP, fb9be046aefb2e0b6e832dd00bc44a38ee62ab1f,SKIP, fc2ef413fab50d4375318fbd667051fd02f085f2,SKIP, fd5c5fb3887914183a1510f5972e50d9365e02f5,SKIP, +fd7beffcf92e1f435f4fd4aafb98057f067e9de4,SKIP, fe84833ea22c30b68022203132706ebb1e526852,SKIP, fe9e7483a316df9f5a62e9c215bcedcfd65c5f12,SKIP, ffcd4d424f69b4ecac1bd9f5980c14bb4b61a3fa,ADD,HBASE-13796 +ffeed7c6648391f02fd97d1da1fe4d210398437e,SKIP, From 45ef88dc8d449cd68284e6990c33859ddaa16b4e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 8 Jan 2021 17:50:08 +0800 Subject: [PATCH 312/769] HBASE-25471 Move RegionScannerImpl out of HRegion (#2853) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/regionserver/HRegion.java | 754 +---------------- .../hbase/regionserver/RSRpcServices.java | 1 - .../hbase/regionserver/RegionScannerImpl.java | 782 ++++++++++++++++++ .../ReversedRegionScannerImpl.java | 13 +- .../TestTransitRegionStateProcedure.java | 4 +- .../hbase/regionserver/TestHRegion.java | 3 +- .../regionserver/TestScanWithBloomError.java | 3 +- .../TestScannerHeartbeatMessages.java | 5 +- .../regionserver/TestSwitchToStreamRead.java | 6 +- .../hbase/regionserver/TestWideScanner.java | 2 +- 10 files changed, 802 insertions(+), 771 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 4ec61ac5c051..3b32f46ed044 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -30,7 +30,6 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.text.ParseException; -import java.util.AbstractList; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -100,7 +99,6 @@ import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; -import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; @@ -112,7 +110,6 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -132,14 +129,11 @@ import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FilterWrapper; -import org.apache.hadoop.hbase.filter.IncompatibleFilterException; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -148,8 +142,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry; -import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker; @@ -395,7 +387,7 @@ public void setRestoredRegion(boolean restoredRegion) { static final long DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L; final ExecutorService rowProcessorExecutor = Executors.newCachedThreadPool(); - private final ConcurrentHashMap scannerReadPoints; + final ConcurrentHashMap scannerReadPoints; /** * The sequence ID that was enLongAddered when this region was opened. @@ -904,8 +896,8 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co Pair retainedRWRequestsCnt = rsServices.getRegionServerAccounting() .getRetainedRegionRWRequestsCnt().get(getRegionInfo().getEncodedName()); if (retainedRWRequestsCnt != null) { - this.setReadRequestsCount(retainedRWRequestsCnt.getFirst()); - this.setWriteRequestsCount(retainedRWRequestsCnt.getSecond()); + this.addReadRequestsCount(retainedRWRequestsCnt.getFirst()); + this.addWriteRequestsCount(retainedRWRequestsCnt.getSecond()); // remove them since won't use again rsServices.getRegionServerAccounting().getRetainedRegionRWRequestsCnt() .remove(getRegionInfo().getEncodedName()); @@ -3160,12 +3152,12 @@ private RegionScannerImpl getScanner(Scan scan, List additional } protected RegionScannerImpl instantiateRegionScanner(Scan scan, - List additionalScanners, long nonceGroup, long nonce) throws IOException { + List additionalScanners, long nonceGroup, long nonce) throws IOException { if (scan.isReversed()) { if (scan.getFilter() != null) { scan.getFilter().setReversed(true); } - return new ReversedRegionScannerImpl(scan, additionalScanners, this); + return new ReversedRegionScannerImpl(scan, additionalScanners, this, nonceGroup, nonce); } return new RegionScannerImpl(scan, additionalScanners, this, nonceGroup, nonce); } @@ -4039,7 +4031,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now // Sort the cells so that they match the order that they appear in the Get results. // Otherwise, we won't be able to find the existing values if the cells are not specified // in order by the client since cells are in an array list. - sort(deltas, store.getComparator()); + deltas.sort(store.getComparator()); // Get previous values for all columns in this family. Get get = new Get(mutation.getRow()); @@ -7086,702 +7078,6 @@ public String toString() { return getRegionInfo().getRegionNameAsString(); } - /** - * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). - */ - class RegionScannerImpl - implements RegionScanner, Shipper, org.apache.hadoop.hbase.ipc.RpcCallback { - // Package local for testability - KeyValueHeap storeHeap = null; - /** Heap of key-values that are not essential for the provided filters and are thus read - * on demand, if on-demand column family loading is enabled.*/ - KeyValueHeap joinedHeap = null; - /** - * If the joined heap data gathering is interrupted due to scan limits, this will - * contain the row for which we are populating the values.*/ - protected Cell joinedContinuationRow = null; - private boolean filterClosed = false; - - protected final byte[] stopRow; - protected final boolean includeStopRow; - protected final HRegion region; - protected final CellComparator comparator; - - private final long readPt; - private final long maxResultSize; - private final ScannerContext defaultScannerContext; - private final FilterWrapper filter; - - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); - } - - RegionScannerImpl(Scan scan, List additionalScanners, HRegion region) - throws IOException { - this(scan, additionalScanners, region, HConstants.NO_NONCE, HConstants.NO_NONCE); - } - - RegionScannerImpl(Scan scan, List additionalScanners, HRegion region, - long nonceGroup, long nonce) throws IOException { - this.region = region; - this.maxResultSize = scan.getMaxResultSize(); - if (scan.hasFilter()) { - this.filter = new FilterWrapper(scan.getFilter()); - } else { - this.filter = null; - } - this.comparator = region.getCellComparator(); - /** - * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default - * scanner context that can be used to enforce the batch limit in the event that a - * ScannerContext is not specified during an invocation of next/nextRaw - */ - defaultScannerContext = ScannerContext.newBuilder() - .setBatchLimit(scan.getBatch()).build(); - this.stopRow = scan.getStopRow(); - this.includeStopRow = scan.includeStopRow(); - - // synchronize on scannerReadPoints so that nobody calculates - // getSmallestReadPoint, before scannerReadPoints is updated. - IsolationLevel isolationLevel = scan.getIsolationLevel(); - long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); - synchronized (scannerReadPoints) { - if (mvccReadPoint > 0) { - this.readPt = mvccReadPoint; - } else if (nonce == HConstants.NO_NONCE || rsServices == null - || rsServices.getNonceManager() == null) { - this.readPt = getReadPoint(isolationLevel); - } else { - this.readPt = rsServices.getNonceManager().getMvccFromOperationContext(nonceGroup, nonce); - } - scannerReadPoints.put(this, this.readPt); - } - initializeScanners(scan, additionalScanners); - } - - protected void initializeScanners(Scan scan, List additionalScanners) - throws IOException { - // Here we separate all scanners into two lists - scanner that provide data required - // by the filter to operate (scanners list) and all others (joinedScanners list). - List scanners = new ArrayList<>(scan.getFamilyMap().size()); - List joinedScanners = new ArrayList<>(scan.getFamilyMap().size()); - // Store all already instantiated scanners for exception handling - List instantiatedScanners = new ArrayList<>(); - // handle additionalScanners - if (additionalScanners != null && !additionalScanners.isEmpty()) { - scanners.addAll(additionalScanners); - instantiatedScanners.addAll(additionalScanners); - } - - try { - for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { - HStore store = stores.get(entry.getKey()); - KeyValueScanner scanner = store.getScanner(scan, entry.getValue(), this.readPt); - instantiatedScanners.add(scanner); - if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand() - || this.filter.isFamilyEssential(entry.getKey())) { - scanners.add(scanner); - } else { - joinedScanners.add(scanner); - } - } - initializeKVHeap(scanners, joinedScanners, region); - } catch (Throwable t) { - throw handleException(instantiatedScanners, t); - } - } - - protected void initializeKVHeap(List scanners, - List joinedScanners, HRegion region) - throws IOException { - this.storeHeap = new KeyValueHeap(scanners, comparator); - if (!joinedScanners.isEmpty()) { - this.joinedHeap = new KeyValueHeap(joinedScanners, comparator); - } - } - - private IOException handleException(List instantiatedScanners, - Throwable t) { - // remove scaner read point before throw the exception - scannerReadPoints.remove(this); - if (storeHeap != null) { - storeHeap.close(); - storeHeap = null; - if (joinedHeap != null) { - joinedHeap.close(); - joinedHeap = null; - } - } else { - // close all already instantiated scanners before throwing the exception - for (KeyValueScanner scanner : instantiatedScanners) { - scanner.close(); - } - } - return t instanceof IOException ? (IOException) t : new IOException(t); - } - - @Override - public long getMaxResultSize() { - return maxResultSize; - } - - @Override - public long getMvccReadPoint() { - return this.readPt; - } - - @Override - public int getBatch() { - return this.defaultScannerContext.getBatchLimit(); - } - - /** - * Reset both the filter and the old filter. - * - * @throws IOException in case a filter raises an I/O exception. - */ - protected void resetFilters() throws IOException { - if (filter != null) { - filter.reset(); - } - } - - @Override - public boolean next(List outResults) - throws IOException { - // apply the batching limit by default - return next(outResults, defaultScannerContext); - } - - @Override - public synchronized boolean next(List outResults, ScannerContext scannerContext) - throws IOException { - if (this.filterClosed) { - throw new UnknownScannerException("Scanner was closed (timed out?) " + - "after we renewed it. Could be caused by a very slow scanner " + - "or a lengthy garbage collection"); - } - startRegionOperation(Operation.SCAN); - try { - return nextRaw(outResults, scannerContext); - } finally { - closeRegionOperation(Operation.SCAN); - } - } - - @Override - public boolean nextRaw(List outResults) throws IOException { - // Use the RegionScanner's context by default - return nextRaw(outResults, defaultScannerContext); - } - - @Override - public boolean nextRaw(List outResults, ScannerContext scannerContext) - throws IOException { - if (storeHeap == null) { - // scanner is closed - throw new UnknownScannerException("Scanner was closed"); - } - boolean moreValues = false; - if (outResults.isEmpty()) { - // Usually outResults is empty. This is true when next is called - // to handle scan or get operation. - moreValues = nextInternal(outResults, scannerContext); - } else { - List tmpList = new ArrayList<>(); - moreValues = nextInternal(tmpList, scannerContext); - outResults.addAll(tmpList); - } - - if (!outResults.isEmpty()) { - readRequestsCount.increment(); - if (metricsRegion != null) { - metricsRegion.updateReadRequestCount(); - } - } - if (rsServices != null && rsServices.getMetrics() != null) { - rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable()); - } - - // If the size limit was reached it means a partial Result is being returned. Returning a - // partial Result means that we should not reset the filters; filters should only be reset in - // between rows - if (!scannerContext.mayHaveMoreCellsInRow()) { - resetFilters(); - } - - if (isFilterDoneInternal()) { - moreValues = false; - } - return moreValues; - } - - /** - * @return true if more cells exist after this batch, false if scanner is done - */ - private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) - throws IOException { - assert joinedContinuationRow != null; - boolean moreValues = populateResult(results, this.joinedHeap, scannerContext, - joinedContinuationRow); - - if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - // We are done with this row, reset the continuation. - joinedContinuationRow = null; - } - // As the data is obtained from two independent heaps, we need to - // ensure that result list is sorted, because Result relies on that. - sort(results, comparator); - return moreValues; - } - - /** - * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is - * reached, or remainingResultSize (if not -1) is reaced - * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. - * @param scannerContext - * @param currentRowCell - * @return state of last call to {@link KeyValueHeap#next()} - */ - private boolean populateResult(List results, KeyValueHeap heap, - ScannerContext scannerContext, Cell currentRowCell) throws IOException { - Cell nextKv; - boolean moreCellsInRow = false; - boolean tmpKeepProgress = scannerContext.getKeepProgress(); - // Scanning between column families and thus the scope is between cells - LimitScope limitScope = LimitScope.BETWEEN_CELLS; - do { - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - - // We want to maintain any progress that is made towards the limits while scanning across - // different column families. To do this, we toggle the keep progress flag on during calls - // to the StoreScanner to ensure that any progress made thus far is not wiped away. - scannerContext.setKeepProgress(true); - heap.next(results, scannerContext); - scannerContext.setKeepProgress(tmpKeepProgress); - - nextKv = heap.peek(); - moreCellsInRow = moreCellsInRow(nextKv, currentRowCell); - if (!moreCellsInRow) incrementCountOfRowsScannedMetric(scannerContext); - if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) { - return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); - } else if (scannerContext.checkSizeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; - return scannerContext.setScannerState(state).hasMoreValues(); - } else if (scannerContext.checkTimeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED; - return scannerContext.setScannerState(state).hasMoreValues(); - } - } while (moreCellsInRow); - return nextKv != null; - } - - /** - * Based on the nextKv in the heap, and the current row, decide whether or not there are more - * cells to be read in the heap. If the row of the nextKv in the heap matches the current row - * then there are more cells to be read in the row. - * @param nextKv - * @param currentRowCell - * @return true When there are more cells in the row to be read - */ - private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell) { - return nextKv != null && CellUtil.matchingRows(nextKv, currentRowCell); - } - - /* - * @return True if a filter rules the scanner is over, done. - */ - @Override - public synchronized boolean isFilterDone() throws IOException { - return isFilterDoneInternal(); - } - - private boolean isFilterDoneInternal() throws IOException { - return this.filter != null && this.filter.filterAllRemaining(); - } - - private boolean nextInternal(List results, ScannerContext scannerContext) - throws IOException { - if (!results.isEmpty()) { - throw new IllegalArgumentException("First parameter should be an empty list"); - } - if (scannerContext == null) { - throw new IllegalArgumentException("Scanner context cannot be null"); - } - Optional rpcCall = RpcServer.getCurrentCall(); - - // Save the initial progress from the Scanner context in these local variables. The progress - // may need to be reset a few times if rows are being filtered out so we save the initial - // progress. - int initialBatchProgress = scannerContext.getBatchProgress(); - long initialSizeProgress = scannerContext.getDataSizeProgress(); - long initialHeapSizeProgress = scannerContext.getHeapSizeProgress(); - - // Used to check time limit - LimitScope limitScope = LimitScope.BETWEEN_CELLS; - - // The loop here is used only when at some point during the next we determine - // that due to effects of filters or otherwise, we have an empty row in the result. - // Then we loop and try again. Otherwise, we must get out on the first iteration via return, - // "true" if there's more data to read, "false" if there isn't (storeHeap is at a stop row, - // and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow). - while (true) { - // Starting to scan a new row. Reset the scanner progress according to whether or not - // progress should be kept. - if (scannerContext.getKeepProgress()) { - // Progress should be kept. Reset to initial values seen at start of method invocation. - scannerContext.setProgress(initialBatchProgress, initialSizeProgress, - initialHeapSizeProgress); - } else { - scannerContext.clearProgress(); - } - if (rpcCall.isPresent()) { - // If a user specifies a too-restrictive or too-slow scanner, the - // client might time out and disconnect while the server side - // is still processing the request. We should abort aggressively - // in that case. - long afterTime = rpcCall.get().disconnectSince(); - if (afterTime >= 0) { - throw new CallerDisconnectedException( - "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + - this + " after " + afterTime + " ms, since " + - "caller disconnected"); - } - } - - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - - // Let's see what we have in the storeHeap. - Cell current = this.storeHeap.peek(); - - boolean shouldStop = shouldStop(current); - // When has filter row is true it means that the all the cells for a particular row must be - // read before a filtering decision can be made. This means that filters where hasFilterRow - // run the risk of enLongAddering out of memory errors in the case that they are applied to a - // table that has very large rows. - boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); - - // If filter#hasFilterRow is true, partial results are not allowed since allowing them - // would prevent the filters from being evaluated. Thus, if it is true, change the - // scope of any limits that could potentially create partial results to - // LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row - if (hasFilterRow) { - if (LOG.isTraceEnabled()) { - LOG.trace("filter#hasFilterRow is true which prevents partial results from being " - + " formed. Changing scope of limits that may create partials"); - } - scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS); - scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS); - limitScope = LimitScope.BETWEEN_ROWS; - } - - if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { - if (hasFilterRow) { - throw new IncompatibleFilterException( - "Filter whose hasFilterRow() returns true is incompatible with scans that must " + - " stop mid-row because of a limit. ScannerContext:" + scannerContext); - } - return true; - } - - // Check if we were getting data from the joinedHeap and hit the limit. - // If not, then it's main path - getting results from storeHeap. - if (joinedContinuationRow == null) { - // First, check if we are at a stop row. If so, there are no more results. - if (shouldStop) { - if (hasFilterRow) { - filter.filterRowCells(results); - } - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // Check if rowkey filter wants to exclude this row. If so, loop to next. - // Technically, if we hit limits before on this row, we don't need this call. - if (filterRowKey(current)) { - incrementCountOfRowsFilteredMetric(scannerContext); - // early check, see HBASE-16296 - if (isFilterDoneInternal()) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - // Typically the count of rows scanned is incremented inside #populateResult. However, - // here we are filtering a row based purely on its row key, preventing us from calling - // #populateResult. Thus, perform the necessary increment here to rows scanned metric - incrementCountOfRowsScannedMetric(scannerContext); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - results.clear(); - - // Read nothing as the rowkey was filtered, but still need to check time limit - if (scannerContext.checkTimeLimit(limitScope)) { - return true; - } - continue; - } - - // Ok, we are good, let's try to get some results from the main heap. - populateResult(results, this.storeHeap, scannerContext, current); - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - if (hasFilterRow) { - throw new IncompatibleFilterException( - "Filter whose hasFilterRow() returns true is incompatible with scans that must " - + " stop mid-row because of a limit. ScannerContext:" + scannerContext); - } - return true; - } - - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - - Cell nextKv = this.storeHeap.peek(); - shouldStop = shouldStop(nextKv); - // save that the row was empty before filters applied to it. - final boolean isEmptyRow = results.isEmpty(); - - // We have the part of the row necessary for filtering (all of it, usually). - // First filter with the filterRow(List). - FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; - if (hasFilterRow) { - ret = filter.filterRowCellsWithRet(results); - - // We don't know how the results have changed after being filtered. Must set progress - // according to contents of results now. - if (scannerContext.getKeepProgress()) { - scannerContext.setProgress(initialBatchProgress, initialSizeProgress, - initialHeapSizeProgress); - } else { - scannerContext.clearProgress(); - } - scannerContext.incrementBatchProgress(results.size()); - for (Cell cell : results) { - scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), - cell.heapSize()); - } - } - - if (isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE || filterRow()) { - incrementCountOfRowsFilteredMetric(scannerContext); - results.clear(); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // This row was totally filtered out, if this is NOT the last row, - // we should continue on. Otherwise, nothing else to do. - if (!shouldStop) { - // Read nothing as the cells was filtered, but still need to check time limit - if (scannerContext.checkTimeLimit(limitScope)) { - return true; - } - continue; - } - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // Ok, we are done with storeHeap for this row. - // Now we may need to fetch additional, non-essential data into row. - // These values are not needed for filter to work, so we postpone their - // fetch to (possibly) reduce amount of data loads from disk. - if (this.joinedHeap != null) { - boolean mayHaveData = joinedHeapMayHaveData(current); - if (mayHaveData) { - joinedContinuationRow = current; - populateFromJoinedHeap(results, scannerContext); - - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - return true; - } - } - } - } else { - // Populating from the joined heap was stopped by limits, populate some more. - populateFromJoinedHeap(results, scannerContext); - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - return true; - } - } - // We may have just called populateFromJoinedMap and hit the limits. If that is - // the case, we need to call it again on the next next() invocation. - if (joinedContinuationRow != null) { - return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); - } - - // Finally, we are done with both joinedHeap and storeHeap. - // Double check to prevent empty rows from appearing in result. It could be - // the case when SingleColumnValueExcludeFilter is used. - if (results.isEmpty()) { - incrementCountOfRowsFilteredMetric(scannerContext); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - if (!shouldStop) continue; - } - - if (shouldStop) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } else { - return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); - } - } - } - - protected void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) { - filteredReadRequestsCount.increment(); - if (metricsRegion != null) { - metricsRegion.updateFilteredRecords(); - } - - if (scannerContext == null || !scannerContext.isTrackingMetrics()) return; - - scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet(); - } - - protected void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { - if (scannerContext == null || !scannerContext.isTrackingMetrics()) return; - - scannerContext.getMetrics().countOfRowsScanned.incrementAndGet(); - } - - /** - * @param currentRowCell - * @return true when the joined heap may have data for the current row - * @throws IOException - */ - private boolean joinedHeapMayHaveData(Cell currentRowCell) - throws IOException { - Cell nextJoinedKv = joinedHeap.peek(); - boolean matchCurrentRow = - nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); - boolean matchAfterSeek = false; - - // If the next value in the joined heap does not match the current row, try to seek to the - // correct row - if (!matchCurrentRow) { - Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); - boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); - matchAfterSeek = - seekSuccessful && joinedHeap.peek() != null - && CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); - } - - return matchCurrentRow || matchAfterSeek; - } - - /** - * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines - * both filterRow & filterRow({@code List kvs}) functions. While 0.94 code or older, - * it may not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only - * returns true when filterRow({@code List kvs}) is overridden not the filterRow(). - * Therefore, the filterRow() will be skipped. - */ - private boolean filterRow() throws IOException { - // when hasFilterRow returns true, filter.filterRow() will be called automatically inside - // filterRowCells(List kvs) so we skip that scenario here. - return filter != null && (!filter.hasFilterRow()) - && filter.filterRow(); - } - - private boolean filterRowKey(Cell current) throws IOException { - return filter != null && filter.filterRowKey(current); - } - - protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { - assert this.joinedContinuationRow == null: "Trying to go to next row during joinedHeap read."; - Cell next; - while ((next = this.storeHeap.peek()) != null && - CellUtil.matchingRows(next, curRowCell)) { - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - this.storeHeap.next(MOCKED_LIST); - } - resetFilters(); - - // Calling the hook in CP which allows it to do a fast forward - return this.region.getCoprocessorHost() == null - || this.region.getCoprocessorHost() - .postScannerFilterRow(this, curRowCell); - } - - protected boolean shouldStop(Cell currentRowCell) { - if (currentRowCell == null) { - return true; - } - if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) { - return false; - } - int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length); - return c > 0 || (c == 0 && !includeStopRow); - } - - @Override - public synchronized void close() { - if (storeHeap != null) { - storeHeap.close(); - storeHeap = null; - } - if (joinedHeap != null) { - joinedHeap.close(); - joinedHeap = null; - } - // no need to synchronize here. - scannerReadPoints.remove(this); - this.filterClosed = true; - } - - KeyValueHeap getStoreHeapForTesting() { - return storeHeap; - } - - @Override - public synchronized boolean reseek(byte[] row) throws IOException { - if (row == null) { - throw new IllegalArgumentException("Row cannot be null."); - } - boolean result = false; - startRegionOperation(); - Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); - try { - // use request seek to make use of the lazy seek option. See HBASE-5520 - result = this.storeHeap.requestSeek(kv, true, true); - if (this.joinedHeap != null) { - result = this.joinedHeap.requestSeek(kv, true, true) || result; - } - } finally { - closeRegionOperation(); - } - return result; - } - - @Override - public void shipped() throws IOException { - if (storeHeap != null) { - storeHeap.shipped(); - } - if (joinedHeap != null) { - joinedHeap.shipped(); - } - } - - @Override - public void run() throws IOException { - // This is the RPC callback method executed. We do the close in of the scanner in this - // callback - this.close(); - } - } - // Utility methods /** * A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL} @@ -8661,14 +7957,6 @@ private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, Listcells using comparator - */ - private static List sort(List cells, final CellComparator comparator) { - cells.sort(comparator); - return cells; - } - public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HRegion.class, false); // woefully out of date - currently missing: @@ -9067,32 +8355,6 @@ private boolean shouldSyncWAL() { return regionDurability.ordinal() > Durability.ASYNC_WAL.ordinal(); } - /** - * A mocked list implementation - discards all updates. - */ - private static final List MOCKED_LIST = new AbstractList() { - - @Override - public void add(int index, Cell element) { - // do nothing - } - - @Override - public boolean addAll(int index, Collection c) { - return false; // this list is never changed as a result of an update - } - - @Override - public KeyValue get(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public int size() { - return 0; - } - }; - /** @return the latest sequence number that was read from storage when this region was opened */ public long getOpenSeqNum() { return this.openSeqNum; @@ -9340,11 +8602,11 @@ private static void decorateRegionConfiguration(Configuration conf) { } } - public void setReadRequestsCount(long readRequestsCount) { + public void addReadRequestsCount(long readRequestsCount) { this.readRequestsCount.add(readRequestsCount); } - public void setWriteRequestsCount(long writeRequestsCount) { + public void addWriteRequestsCount(long writeRequestsCount) { this.writeRequestsCount.add(writeRequestsCount); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index f8323c6a1164..587919dac6d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -120,7 +120,6 @@ import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.LeaseManager.Lease; import org.apache.hadoop.hbase.regionserver.LeaseManager.LeaseStillHeldException; import org.apache.hadoop.hbase.regionserver.Region.Operation; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java new file mode 100644 index 000000000000..5d81687cbf45 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -0,0 +1,782 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.IsolationLevel; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.FilterWrapper; +import org.apache.hadoop.hbase.filter.IncompatibleFilterException; +import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcCallback; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.regionserver.Region.Operation; +import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +/** + * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). + */ +@InterfaceAudience.Private +class RegionScannerImpl implements RegionScanner, Shipper, RpcCallback { + + private static final Logger LOG = LoggerFactory.getLogger(RegionScannerImpl.class); + + // Package local for testability + KeyValueHeap storeHeap = null; + + /** + * Heap of key-values that are not essential for the provided filters and are thus read on demand, + * if on-demand column family loading is enabled. + */ + KeyValueHeap joinedHeap = null; + + /** + * If the joined heap data gathering is interrupted due to scan limits, this will contain the row + * for which we are populating the values. + */ + protected Cell joinedContinuationRow = null; + private boolean filterClosed = false; + + protected final byte[] stopRow; + protected final boolean includeStopRow; + protected final HRegion region; + protected final CellComparator comparator; + + private final ConcurrentHashMap scannerReadPoints; + + private final long readPt; + private final long maxResultSize; + private final ScannerContext defaultScannerContext; + private final FilterWrapper filter; + + private RegionServerServices rsServices; + + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } + + private static boolean hasNonce(HRegion region, long nonce) { + RegionServerServices rsServices = region.getRegionServerServices(); + return nonce != HConstants.NO_NONCE && rsServices != null && + rsServices.getNonceManager() != null; + } + + RegionScannerImpl(Scan scan, List additionalScanners, HRegion region, + long nonceGroup, long nonce) throws IOException { + this.region = region; + this.maxResultSize = scan.getMaxResultSize(); + if (scan.hasFilter()) { + this.filter = new FilterWrapper(scan.getFilter()); + } else { + this.filter = null; + } + this.comparator = region.getCellComparator(); + /** + * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default + * scanner context that can be used to enforce the batch limit in the event that a + * ScannerContext is not specified during an invocation of next/nextRaw + */ + defaultScannerContext = ScannerContext.newBuilder().setBatchLimit(scan.getBatch()).build(); + this.stopRow = scan.getStopRow(); + this.includeStopRow = scan.includeStopRow(); + + // synchronize on scannerReadPoints so that nobody calculates + // getSmallestReadPoint, before scannerReadPoints is updated. + IsolationLevel isolationLevel = scan.getIsolationLevel(); + long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); + this.scannerReadPoints = region.scannerReadPoints; + this.rsServices = region.getRegionServerServices(); + synchronized (scannerReadPoints) { + if (mvccReadPoint > 0) { + this.readPt = mvccReadPoint; + } else if (hasNonce(region, nonce)) { + this.readPt = rsServices.getNonceManager().getMvccFromOperationContext(nonceGroup, nonce); + } else { + this.readPt = region.getReadPoint(isolationLevel); + } + scannerReadPoints.put(this, this.readPt); + } + initializeScanners(scan, additionalScanners); + } + + private void initializeScanners(Scan scan, List additionalScanners) + throws IOException { + // Here we separate all scanners into two lists - scanner that provide data required + // by the filter to operate (scanners list) and all others (joinedScanners list). + List scanners = new ArrayList<>(scan.getFamilyMap().size()); + List joinedScanners = new ArrayList<>(scan.getFamilyMap().size()); + // Store all already instantiated scanners for exception handling + List instantiatedScanners = new ArrayList<>(); + // handle additionalScanners + if (additionalScanners != null && !additionalScanners.isEmpty()) { + scanners.addAll(additionalScanners); + instantiatedScanners.addAll(additionalScanners); + } + + try { + for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { + HStore store = region.getStore(entry.getKey()); + KeyValueScanner scanner = store.getScanner(scan, entry.getValue(), this.readPt); + instantiatedScanners.add(scanner); + if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand() || + this.filter.isFamilyEssential(entry.getKey())) { + scanners.add(scanner); + } else { + joinedScanners.add(scanner); + } + } + initializeKVHeap(scanners, joinedScanners, region); + } catch (Throwable t) { + throw handleException(instantiatedScanners, t); + } + } + + protected void initializeKVHeap(List scanners, + List joinedScanners, HRegion region) throws IOException { + this.storeHeap = new KeyValueHeap(scanners, comparator); + if (!joinedScanners.isEmpty()) { + this.joinedHeap = new KeyValueHeap(joinedScanners, comparator); + } + } + + private IOException handleException(List instantiatedScanners, Throwable t) { + // remove scaner read point before throw the exception + scannerReadPoints.remove(this); + if (storeHeap != null) { + storeHeap.close(); + storeHeap = null; + if (joinedHeap != null) { + joinedHeap.close(); + joinedHeap = null; + } + } else { + // close all already instantiated scanners before throwing the exception + for (KeyValueScanner scanner : instantiatedScanners) { + scanner.close(); + } + } + return t instanceof IOException ? (IOException) t : new IOException(t); + } + + @Override + public long getMaxResultSize() { + return maxResultSize; + } + + @Override + public long getMvccReadPoint() { + return this.readPt; + } + + @Override + public int getBatch() { + return this.defaultScannerContext.getBatchLimit(); + } + + /** + * Reset both the filter and the old filter. + * @throws IOException in case a filter raises an I/O exception. + */ + protected final void resetFilters() throws IOException { + if (filter != null) { + filter.reset(); + } + } + + @Override + public boolean next(List outResults) throws IOException { + // apply the batching limit by default + return next(outResults, defaultScannerContext); + } + + @Override + public synchronized boolean next(List outResults, ScannerContext scannerContext) + throws IOException { + if (this.filterClosed) { + throw new UnknownScannerException("Scanner was closed (timed out?) " + + "after we renewed it. Could be caused by a very slow scanner " + + "or a lengthy garbage collection"); + } + region.startRegionOperation(Operation.SCAN); + try { + return nextRaw(outResults, scannerContext); + } finally { + region.closeRegionOperation(Operation.SCAN); + } + } + + @Override + public boolean nextRaw(List outResults) throws IOException { + // Use the RegionScanner's context by default + return nextRaw(outResults, defaultScannerContext); + } + + @Override + public boolean nextRaw(List outResults, ScannerContext scannerContext) throws IOException { + if (storeHeap == null) { + // scanner is closed + throw new UnknownScannerException("Scanner was closed"); + } + boolean moreValues = false; + if (outResults.isEmpty()) { + // Usually outResults is empty. This is true when next is called + // to handle scan or get operation. + moreValues = nextInternal(outResults, scannerContext); + } else { + List tmpList = new ArrayList<>(); + moreValues = nextInternal(tmpList, scannerContext); + outResults.addAll(tmpList); + } + + if (!outResults.isEmpty()) { + region.addReadRequestsCount(1); + if (region.getMetrics() != null) { + region.getMetrics().updateReadRequestCount(); + } + } + if (rsServices != null && rsServices.getMetrics() != null) { + rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable()); + } + + // If the size limit was reached it means a partial Result is being returned. Returning a + // partial Result means that we should not reset the filters; filters should only be reset in + // between rows + if (!scannerContext.mayHaveMoreCellsInRow()) { + resetFilters(); + } + + if (isFilterDoneInternal()) { + moreValues = false; + } + return moreValues; + } + + /** + * @return true if more cells exist after this batch, false if scanner is done + */ + private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) + throws IOException { + assert joinedContinuationRow != null; + boolean moreValues = + populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow); + + if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + // We are done with this row, reset the continuation. + joinedContinuationRow = null; + } + // As the data is obtained from two independent heaps, we need to + // ensure that result list is sorted, because Result relies on that. + results.sort(comparator); + return moreValues; + } + + /** + * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is + * reached, or remainingResultSize (if not -1) is reaced + * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. + * @return state of last call to {@link KeyValueHeap#next()} + */ + private boolean populateResult(List results, KeyValueHeap heap, + ScannerContext scannerContext, Cell currentRowCell) throws IOException { + Cell nextKv; + boolean moreCellsInRow = false; + boolean tmpKeepProgress = scannerContext.getKeepProgress(); + // Scanning between column families and thus the scope is between cells + LimitScope limitScope = LimitScope.BETWEEN_CELLS; + do { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + // We want to maintain any progress that is made towards the limits while scanning across + // different column families. To do this, we toggle the keep progress flag on during calls + // to the StoreScanner to ensure that any progress made thus far is not wiped away. + scannerContext.setKeepProgress(true); + heap.next(results, scannerContext); + scannerContext.setKeepProgress(tmpKeepProgress); + + nextKv = heap.peek(); + moreCellsInRow = moreCellsInRow(nextKv, currentRowCell); + if (!moreCellsInRow) { + incrementCountOfRowsScannedMetric(scannerContext); + } + if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) { + return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); + } else if (scannerContext.checkSizeLimit(limitScope)) { + ScannerContext.NextState state = + moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; + return scannerContext.setScannerState(state).hasMoreValues(); + } else if (scannerContext.checkTimeLimit(limitScope)) { + ScannerContext.NextState state = + moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED; + return scannerContext.setScannerState(state).hasMoreValues(); + } + } while (moreCellsInRow); + return nextKv != null; + } + + /** + * Based on the nextKv in the heap, and the current row, decide whether or not there are more + * cells to be read in the heap. If the row of the nextKv in the heap matches the current row then + * there are more cells to be read in the row. + * @return true When there are more cells in the row to be read + */ + private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell) { + return nextKv != null && CellUtil.matchingRows(nextKv, currentRowCell); + } + + /** + * @return True if a filter rules the scanner is over, done. + */ + @Override + public synchronized boolean isFilterDone() throws IOException { + return isFilterDoneInternal(); + } + + private boolean isFilterDoneInternal() throws IOException { + return this.filter != null && this.filter.filterAllRemaining(); + } + + private void checkClientDisconnect(Optional rpcCall) throws CallerDisconnectedException { + if (rpcCall.isPresent()) { + // If a user specifies a too-restrictive or too-slow scanner, the + // client might time out and disconnect while the server side + // is still processing the request. We should abort aggressively + // in that case. + long afterTime = rpcCall.get().disconnectSince(); + if (afterTime >= 0) { + throw new CallerDisconnectedException( + "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + this + + " after " + afterTime + " ms, since " + "caller disconnected"); + } + } + } + + private void resetProgress(ScannerContext scannerContext, int initialBatchProgress, + long initialSizeProgress, long initialHeapSizeProgress) { + // Starting to scan a new row. Reset the scanner progress according to whether or not + // progress should be kept. + if (scannerContext.getKeepProgress()) { + // Progress should be kept. Reset to initial values seen at start of method invocation. + scannerContext.setProgress(initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + } else { + scannerContext.clearProgress(); + } + } + + private boolean nextInternal(List results, ScannerContext scannerContext) + throws IOException { + Preconditions.checkArgument(results.isEmpty(), "First parameter should be an empty list"); + Preconditions.checkArgument(scannerContext != null, "Scanner context cannot be null"); + Optional rpcCall = RpcServer.getCurrentCall(); + + // Save the initial progress from the Scanner context in these local variables. The progress + // may need to be reset a few times if rows are being filtered out so we save the initial + // progress. + int initialBatchProgress = scannerContext.getBatchProgress(); + long initialSizeProgress = scannerContext.getDataSizeProgress(); + long initialHeapSizeProgress = scannerContext.getHeapSizeProgress(); + + // Used to check time limit + LimitScope limitScope = LimitScope.BETWEEN_CELLS; + + // The loop here is used only when at some point during the next we determine + // that due to effects of filters or otherwise, we have an empty row in the result. + // Then we loop and try again. Otherwise, we must get out on the first iteration via return, + // "true" if there's more data to read, "false" if there isn't (storeHeap is at a stop row, + // and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow). + while (true) { + resetProgress(scannerContext, initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + checkClientDisconnect(rpcCall); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + // Let's see what we have in the storeHeap. + Cell current = this.storeHeap.peek(); + + boolean shouldStop = shouldStop(current); + // When has filter row is true it means that the all the cells for a particular row must be + // read before a filtering decision can be made. This means that filters where hasFilterRow + // run the risk of enLongAddering out of memory errors in the case that they are applied to a + // table that has very large rows. + boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); + + // If filter#hasFilterRow is true, partial results are not allowed since allowing them + // would prevent the filters from being evaluated. Thus, if it is true, change the + // scope of any limits that could potentially create partial results to + // LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row + if (hasFilterRow) { + if (LOG.isTraceEnabled()) { + LOG.trace("filter#hasFilterRow is true which prevents partial results from being " + + " formed. Changing scope of limits that may create partials"); + } + scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS); + scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS); + limitScope = LimitScope.BETWEEN_ROWS; + } + + if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { + if (hasFilterRow) { + throw new IncompatibleFilterException( + "Filter whose hasFilterRow() returns true is incompatible with scans that must " + + " stop mid-row because of a limit. ScannerContext:" + scannerContext); + } + return true; + } + + // Check if we were getting data from the joinedHeap and hit the limit. + // If not, then it's main path - getting results from storeHeap. + if (joinedContinuationRow == null) { + // First, check if we are at a stop row. If so, there are no more results. + if (shouldStop) { + if (hasFilterRow) { + filter.filterRowCells(results); + } + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // Check if rowkey filter wants to exclude this row. If so, loop to next. + // Technically, if we hit limits before on this row, we don't need this call. + if (filterRowKey(current)) { + incrementCountOfRowsFilteredMetric(scannerContext); + // early check, see HBASE-16296 + if (isFilterDoneInternal()) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + // Typically the count of rows scanned is incremented inside #populateResult. However, + // here we are filtering a row based purely on its row key, preventing us from calling + // #populateResult. Thus, perform the necessary increment here to rows scanned metric + incrementCountOfRowsScannedMetric(scannerContext); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + results.clear(); + + // Read nothing as the rowkey was filtered, but still need to check time limit + if (scannerContext.checkTimeLimit(limitScope)) { + return true; + } + continue; + } + + // Ok, we are good, let's try to get some results from the main heap. + populateResult(results, this.storeHeap, scannerContext, current); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + if (hasFilterRow) { + throw new IncompatibleFilterException( + "Filter whose hasFilterRow() returns true is incompatible with scans that must " + + " stop mid-row because of a limit. ScannerContext:" + scannerContext); + } + return true; + } + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + Cell nextKv = this.storeHeap.peek(); + shouldStop = shouldStop(nextKv); + // save that the row was empty before filters applied to it. + final boolean isEmptyRow = results.isEmpty(); + + // We have the part of the row necessary for filtering (all of it, usually). + // First filter with the filterRow(List). + FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; + if (hasFilterRow) { + ret = filter.filterRowCellsWithRet(results); + + // We don't know how the results have changed after being filtered. Must set progress + // according to contents of results now. + if (scannerContext.getKeepProgress()) { + scannerContext.setProgress(initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + } else { + scannerContext.clearProgress(); + } + scannerContext.incrementBatchProgress(results.size()); + for (Cell cell : results) { + scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), + cell.heapSize()); + } + } + + if (isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE || filterRow()) { + incrementCountOfRowsFilteredMetric(scannerContext); + results.clear(); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // This row was totally filtered out, if this is NOT the last row, + // we should continue on. Otherwise, nothing else to do. + if (!shouldStop) { + // Read nothing as the cells was filtered, but still need to check time limit + if (scannerContext.checkTimeLimit(limitScope)) { + return true; + } + continue; + } + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // Ok, we are done with storeHeap for this row. + // Now we may need to fetch additional, non-essential data into row. + // These values are not needed for filter to work, so we postpone their + // fetch to (possibly) reduce amount of data loads from disk. + if (this.joinedHeap != null) { + boolean mayHaveData = joinedHeapMayHaveData(current); + if (mayHaveData) { + joinedContinuationRow = current; + populateFromJoinedHeap(results, scannerContext); + + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } + } + } + } else { + // Populating from the joined heap was stopped by limits, populate some more. + populateFromJoinedHeap(results, scannerContext); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } + } + // We may have just called populateFromJoinedMap and hit the limits. If that is + // the case, we need to call it again on the next next() invocation. + if (joinedContinuationRow != null) { + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); + } + + // Finally, we are done with both joinedHeap and storeHeap. + // Double check to prevent empty rows from appearing in result. It could be + // the case when SingleColumnValueExcludeFilter is used. + if (results.isEmpty()) { + incrementCountOfRowsFilteredMetric(scannerContext); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + if (!shouldStop) { + continue; + } + } + + if (shouldStop) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } else { + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); + } + } + } + + private void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) { + region.filteredReadRequestsCount.increment(); + if (region.getMetrics() != null) { + region.getMetrics().updateFilteredRecords(); + } + + if (scannerContext == null || !scannerContext.isTrackingMetrics()) { + return; + } + + scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet(); + } + + private void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { + if (scannerContext == null || !scannerContext.isTrackingMetrics()) { + return; + } + + scannerContext.getMetrics().countOfRowsScanned.incrementAndGet(); + } + + /** + * @return true when the joined heap may have data for the current row + */ + private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { + Cell nextJoinedKv = joinedHeap.peek(); + boolean matchCurrentRow = + nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); + boolean matchAfterSeek = false; + + // If the next value in the joined heap does not match the current row, try to seek to the + // correct row + if (!matchCurrentRow) { + Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); + boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); + matchAfterSeek = seekSuccessful && joinedHeap.peek() != null && + CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); + } + + return matchCurrentRow || matchAfterSeek; + } + + /** + * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines both + * filterRow & filterRow({@code List kvs}) functions. While 0.94 code or older, it may + * not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only returns true + * when filterRow({@code List kvs}) is overridden not the filterRow(). Therefore, the + * filterRow() will be skipped. + */ + private boolean filterRow() throws IOException { + // when hasFilterRow returns true, filter.filterRow() will be called automatically inside + // filterRowCells(List kvs) so we skip that scenario here. + return filter != null && (!filter.hasFilterRow()) && filter.filterRow(); + } + + private boolean filterRowKey(Cell current) throws IOException { + return filter != null && filter.filterRowKey(current); + } + + /** + * A mocked list implementation - discards all updates. + */ + private static final List MOCKED_LIST = new AbstractList() { + + @Override + public void add(int index, Cell element) { + // do nothing + } + + @Override + public boolean addAll(int index, Collection c) { + return false; // this list is never changed as a result of an update + } + + @Override + public KeyValue get(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public int size() { + return 0; + } + }; + + protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { + assert this.joinedContinuationRow == null : "Trying to go to next row during joinedHeap read."; + Cell next; + while ((next = this.storeHeap.peek()) != null && CellUtil.matchingRows(next, curRowCell)) { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + this.storeHeap.next(MOCKED_LIST); + } + resetFilters(); + + // Calling the hook in CP which allows it to do a fast forward + return this.region.getCoprocessorHost() == null || + this.region.getCoprocessorHost().postScannerFilterRow(this, curRowCell); + } + + protected boolean shouldStop(Cell currentRowCell) { + if (currentRowCell == null) { + return true; + } + if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) { + return false; + } + int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length); + return c > 0 || (c == 0 && !includeStopRow); + } + + @Override + public synchronized void close() { + if (storeHeap != null) { + storeHeap.close(); + storeHeap = null; + } + if (joinedHeap != null) { + joinedHeap.close(); + joinedHeap = null; + } + // no need to synchronize here. + scannerReadPoints.remove(this); + this.filterClosed = true; + } + + @Override + public synchronized boolean reseek(byte[] row) throws IOException { + if (row == null) { + throw new IllegalArgumentException("Row cannot be null."); + } + boolean result = false; + region.startRegionOperation(); + Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); + try { + // use request seek to make use of the lazy seek option. See HBASE-5520 + result = this.storeHeap.requestSeek(kv, true, true); + if (this.joinedHeap != null) { + result = this.joinedHeap.requestSeek(kv, true, true) || result; + } + } finally { + region.closeRegionOperation(); + } + return result; + } + + @Override + public void shipped() throws IOException { + if (storeHeap != null) { + storeHeap.shipped(); + } + if (joinedHeap != null) { + joinedHeap.shipped(); + } + } + + @Override + public void run() throws IOException { + // This is the RPC callback method executed. We do the close in of the scanner in this + // callback + this.close(); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java index 3ca064f05101..d1995f237d2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -37,15 +36,9 @@ @InterfaceAudience.Private class ReversedRegionScannerImpl extends RegionScannerImpl { - /** - * @param scan - * @param additionalScanners - * @param region - * @throws IOException - */ - ReversedRegionScannerImpl(Scan scan, List additionalScanners, HRegion region) - throws IOException { - region.super(scan, additionalScanners, region); + ReversedRegionScannerImpl(Scan scan, List additionalScanners, HRegion region, + long nonceGroup, long nonce) throws IOException { + super(scan, additionalScanners, region, nonceGroup, nonce); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java index c0b954a97137..c55a9f966b67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java @@ -131,8 +131,8 @@ public void testRecoveryAndDoubleExecutionReopen() throws Exception { UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); HRegionServer rs = UTIL.getRSForFirstRegionInTable(tableName); HRegion region = rs.getRegions(tableName).get(0); - region.setReadRequestsCount(1); - region.setWriteRequestsCount(2); + region.addReadRequestsCount(1); + region.addWriteRequestsCount(2); long openSeqNum = region.getOpenSeqNum(); TransitRegionStateProcedure proc = TransitRegionStateProcedure.reopen(env, region.getRegionInfo()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 58668933c61f..b56f96a51149 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -137,7 +137,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.Region.RowLock; import org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem; @@ -3768,7 +3767,7 @@ public void testGetScanner_WithNoFamilies() throws IOException { region.put(put); Scan scan = null; - HRegion.RegionScannerImpl is = null; + RegionScannerImpl is = null; // Testing to see how many scanners that is produced by getScanner, // starting diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 174e2489aa71..92f790c95c13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.BloomFilterUtil; @@ -131,7 +130,7 @@ private void scanColSet(int[] colSet, int[] expectedResultCols) Scan scan = new Scan().withStartRow(ROW_BYTES).withStopRow(ROW_BYTES, true); addColumnSetToScan(scan, colSet); RegionScannerImpl scanner = region.getScanner(scan); - KeyValueHeap storeHeap = scanner.getStoreHeapForTesting(); + KeyValueHeap storeHeap = scanner.storeHeap; assertEquals(0, storeHeap.getHeap().size()); StoreScanner storeScanner = (StoreScanner) storeHeap.getCurrentForTesting(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java index 71796105af71..810f0f68256b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; @@ -525,7 +524,7 @@ protected RegionScannerImpl instantiateRegionScanner(Scan scan, private static class HeartbeatReversedRegionScanner extends ReversedRegionScannerImpl { HeartbeatReversedRegionScanner(Scan scan, List additionalScanners, HRegion region) throws IOException { - super(scan, additionalScanners, region); + super(scan, additionalScanners, region, HConstants.NO_NONCE, HConstants.NO_NONCE); } @Override @@ -554,7 +553,7 @@ protected void initializeKVHeap(List scanners, private static class HeartbeatRegionScanner extends RegionScannerImpl { HeartbeatRegionScanner(Scan scan, List additionalScanners, HRegion region) throws IOException { - region.super(scan, additionalScanners, region); + super(scan, additionalScanners, region, HConstants.NO_NONCE, HConstants.NO_NONCE); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java index 11949153d3e2..61a0689bc4af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -102,7 +101,7 @@ public void tearDown() throws IOException { public void test() throws IOException { try (RegionScannerImpl scanner = REGION.getScanner(new Scan())) { StoreScanner storeScanner = - (StoreScanner) (scanner).getStoreHeapForTesting().getCurrentForTesting(); + (StoreScanner) scanner.storeHeap.getCurrentForTesting(); for (KeyValueScanner kvs : storeScanner.getAllScannersForTesting()) { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; @@ -151,8 +150,7 @@ public boolean filterRowKey(Cell cell) throws IOException { private void testFilter(Filter filter) throws IOException { try (RegionScannerImpl scanner = REGION.getScanner(new Scan().setFilter(filter))) { - StoreScanner storeScanner = - (StoreScanner) (scanner).getStoreHeapForTesting().getCurrentForTesting(); + StoreScanner storeScanner = (StoreScanner) scanner.storeHeap.getCurrentForTesting(); for (KeyValueScanner kvs : storeScanner.getAllScannersForTesting()) { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index fca371f12caa..ea32a3330226 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -157,7 +157,7 @@ public void testWideScanBatching() throws IOException { // trigger ChangedReadersObservers Iterator scanners = - ((HRegion.RegionScannerImpl) s).storeHeap.getHeap().iterator(); + ((RegionScannerImpl) s).storeHeap.getHeap().iterator(); while (scanners.hasNext()) { StoreScanner ss = (StoreScanner) scanners.next(); ss.updateReaders(Collections.emptyList(), Collections.emptyList()); From 7d70141b90d188d69e259ad5de435dad4eed5bf8 Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Fri, 8 Jan 2021 18:58:30 +0800 Subject: [PATCH 313/769] HBASE-25434 SlowDelete & SlowPut metric value should use updateDelete & updatePut (#2837) Signed-off-by: Zheng Wang --- .../hbase/regionserver/MetricsRegionServer.java | 12 ++++++------ .../hbase/regionserver/TestMetricsRegionServer.java | 10 ++++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 3bd787d10074..86b97a2afb9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -113,9 +113,6 @@ public void updatePutBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePutBatch(tn, t); } - if (t > slowMetricTime) { - serverSource.incrSlowPut(); - } serverSource.updatePutBatch(t); } @@ -123,6 +120,9 @@ public void updatePut(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePut(tn, t); } + if (t > slowMetricTime) { + serverSource.incrSlowPut(); + } serverSource.updatePut(t); userAggregate.updatePut(t); } @@ -131,6 +131,9 @@ public void updateDelete(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDelete(tn, t); } + if (t > slowMetricTime) { + serverSource.incrSlowDelete(); + } serverSource.updateDelete(t); userAggregate.updateDelete(t); } @@ -139,9 +142,6 @@ public void updateDeleteBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDeleteBatch(tn, t); } - if (t > slowMetricTime) { - serverSource.incrSlowDelete(); - } serverSource.updateDeleteBatch(t); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index e56eb0f20aaa..777ba5e8feb2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -151,6 +151,8 @@ public void testSlowCount() { for (int i=0; i < 17; i ++) { rsm.updatePut(null, 17); rsm.updateDelete(null, 17); + rsm.updatePut(null, 1006); + rsm.updateDelete(null, 1003); rsm.updateCheckAndDelete(null, 17); rsm.updateCheckAndPut(null, 17); rsm.updateCheckAndMutate(null, 17); @@ -161,17 +163,17 @@ public void testSlowCount() { HELPER.assertCounter("getNumOps", 28, serverSource); HELPER.assertCounter("incrementNumOps", 30, serverSource); HELPER.assertCounter("putBatchNumOps", 32, serverSource); - HELPER.assertCounter("putNumOps", 17, serverSource); - HELPER.assertCounter("deleteNumOps", 17, serverSource); + HELPER.assertCounter("putNumOps", 34, serverSource); + HELPER.assertCounter("deleteNumOps", 34, serverSource); HELPER.assertCounter("checkAndDeleteNumOps", 17, serverSource); HELPER.assertCounter("checkAndPutNumOps", 17, serverSource); HELPER.assertCounter("checkAndMutateNumOps", 17, serverSource); HELPER.assertCounter("slowAppendCount", 12, serverSource); - HELPER.assertCounter("slowDeleteCount", 13, serverSource); + HELPER.assertCounter("slowDeleteCount", 17, serverSource); HELPER.assertCounter("slowGetCount", 14, serverSource); HELPER.assertCounter("slowIncrementCount", 15, serverSource); - HELPER.assertCounter("slowPutCount", 16, serverSource); + HELPER.assertCounter("slowPutCount", 17, serverSource); } @Test From a348204d4a77c0e15ddfd459cecfc4d3427d0a63 Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Fri, 8 Jan 2021 21:57:23 +0800 Subject: [PATCH 314/769] HBASE-25483 set the loadMeta log level to debug (#2859) Signed-off-by: Pankaj Kumar --- .../hadoop/hbase/master/assignment/RegionStateStore.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 4da9493386ae..5036711507f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -164,8 +164,7 @@ private void visitMetaEntry(final RegionStateVisitor visitor, final Result resul ServerName regionLocation = MetaTableAccessor.getTargetServerName(result, replicaId); final long openSeqNum = hrl.getSeqNum(); - // TODO: move under trace, now is visible for debugging - LOG.info( + LOG.debug( "Load hbase:meta entry region={}, regionState={}, lastHost={}, " + "regionLocation={}, openSeqNum={}", regionInfo.getEncodedName(), state, lastHost, regionLocation, openSeqNum); From 5c233e9785c26f40deea32b772eaebe59401dbc8 Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Sat, 9 Jan 2021 00:00:15 +0800 Subject: [PATCH 315/769] HBASE-25459 WAL can't be cleaned in some scenes (#2848) Signed-off-by: Wellington Chevreuil --- .../regionserver/wal/SequenceIdAccounting.java | 6 +++++- .../wal/TestSequenceIdAccounting.java | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 5c6fcd2d1aa6..6be95391819b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -250,7 +250,11 @@ ConcurrentMap getOrCreateLowestSequenceIds(byte[] enco */ private static long getLowestSequenceId(Map sequenceids) { long lowest = HConstants.NO_SEQNUM; - for (Long sid: sequenceids.values()) { + for (Map.Entry entry : sequenceids.entrySet()){ + if (entry.getKey().toString().equals("METAFAMILY")){ + continue; + } + Long sid = entry.getValue(); if (lowest == HConstants.NO_SEQNUM || sid.longValue() < lowest) { lowest = sid.longValue(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java index 098dc86461b6..8eb99b3a4772 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java @@ -44,10 +44,14 @@ public class TestSequenceIdAccounting { private static final byte [] ENCODED_REGION_NAME = Bytes.toBytes("r"); private static final byte [] FAMILY_NAME = Bytes.toBytes("cf"); + private static final byte [] META_FAMILY = Bytes.toBytes("METAFAMILY"); private static final Set FAMILIES; + private static final Set META_FAMILY_SET; static { FAMILIES = new HashSet<>(); FAMILIES.add(FAMILY_NAME); + META_FAMILY_SET = new HashSet<>(); + META_FAMILY_SET.add(META_FAMILY); } @Test @@ -123,6 +127,20 @@ public void testAreAllLower() { sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); assertTrue(sida.areAllLower(m, null)); + m.put(ENCODED_REGION_NAME, sequenceid); + assertFalse(sida.areAllLower(m, null)); + + // Test the METAFAMILY is filtered in SequenceIdAccounting.lowestUnflushedSequenceIds + SequenceIdAccounting meta_sida = new SequenceIdAccounting(); + Map meta_m = new HashMap<>(); + meta_sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); + meta_m.put(ENCODED_REGION_NAME, sequenceid); + meta_sida.update(ENCODED_REGION_NAME, META_FAMILY_SET, ++sequenceid, true); + meta_sida.update(ENCODED_REGION_NAME, META_FAMILY_SET, ++sequenceid, true); + meta_sida.update(ENCODED_REGION_NAME, META_FAMILY_SET, ++sequenceid, true); + assertTrue(meta_sida.areAllLower(meta_m, null)); + meta_m.put(ENCODED_REGION_NAME, sequenceid); + assertTrue(meta_sida.areAllLower(meta_m, null)); } @Test From 8f55a2bb51111536dff89b381141c8f79f2472d6 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 8 Jan 2021 12:38:45 -0800 Subject: [PATCH 316/769] HBASE-25487 [create-release] changes.md update broken (#2864) Signed-off-by: Huaxiang Sun --- dev-support/create-release/release-util.sh | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 5f7224fded6b..48e240f3402c 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -16,6 +16,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# Source this file if you want to use any of its utiilty (also useful +# testing the below functions). Do "$ . ./release-util.sh" and then +# you can do stuff like call the CHANGES updating function +# update_releasenotes: +# +# $ update_releasenotes ~/checkouts/hbase.apache.git 2.3.4 +# +# Just make sure any environment variables needed are predefined +# in your context. +# DRY_RUN=${DRY_RUN:-1} #default to dry run DEBUG=${DEBUG:-0} GPG=${GPG:-gpg} @@ -26,8 +37,6 @@ fi # Maven Profiles for publishing snapshots and release to Maven Central and Dist PUBLISH_PROFILES=("-P" "apache-release,release") -set -e - function error { log "Error: $*" >&2 exit 1 @@ -478,6 +487,7 @@ function generate_api_report { } # Look up the Jira name associated with project. +# Returns result on stdout. # Currently all the 'hbase-*' projects share the same HBASE jira name. This works because, # by convention, the HBASE jira "Fix Version" field values have the sub-project name pre-pended, # as in "hbase-operator-tools-1.0.0". @@ -492,7 +502,7 @@ function get_jira_name { if [[ -z "$jira_name" ]]; then error "Sorry, can't determine the Jira name for project $project" fi - log "$jira_name" + echo "$jira_name" } # Update the CHANGES.md From 49aba571813f649a2ff4482a2209ee9910cc72c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Fri, 8 Jan 2021 14:10:30 -0500 Subject: [PATCH 317/769] HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml Signed-off-by: Josh Elser --- .../src/main/resources/hbase-default.xml | 4 +- .../hadoop/hbase/TestHBaseConfiguration.java | 17 ++++++++ .../src/test/resources/hdfs-default.xml | 42 +++++++++++++++++++ .../src/test/resources/hdfs-scr-enabled.xml | 42 +++++++++++++++++++ 4 files changed, 103 insertions(+), 2 deletions(-) create mode 100644 hbase-common/src/test/resources/hdfs-default.xml create mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 9092dd147198..20f3881edb2c 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - false + If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - none + This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 6a0b4283ed03..1144f1daf351 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -115,6 +115,23 @@ public void testSecurityConfCaseInsensitive() { conf.set("hbase.security.authentication", "KERBeros"); Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } + + @Test + public void testGetConfigOfShortcircuitRead() throws Exception { + Configuration conf = HBaseConfiguration.create(); + Configuration.addDefaultResource("hdfs-default.xml"); + assertEquals("hdfs-default.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("false", conf.get("dfs.client.read.shortcircuit")); + assertNull(conf.get("dfs.domain.socket.path")); + Configuration.addDefaultResource("hdfs-scr-enabled.xml"); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.domain.socket.path")[0]); + assertEquals("true", conf.get("dfs.client.read.shortcircuit")); + assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); + } private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = diff --git a/hbase-common/src/test/resources/hdfs-default.xml b/hbase-common/src/test/resources/hdfs-default.xml new file mode 100644 index 000000000000..fdb3c36edc87 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-default.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + false + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml new file mode 100644 index 000000000000..8594494782c5 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + true + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + /var/lib/hadoop-hdfs/dn_socket + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + From 84c4033b1a2518df7421bad0afc1370cd8d25613 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 8 Jan 2021 14:43:56 -0800 Subject: [PATCH 318/769] HBASE-25489 improve performance of --parse-release-tags (#2867) Profiler shows a lot of time spent in the UPDATE SQL statement. Remove the tight loop and let SQL do a bulk-update instead. Signed-off-by: Huaxiang Sun Signed-off-by: Michael Stack --- .../git_jira_release_audit.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/dev-support/git-jira-release-audit/git_jira_release_audit.py b/dev-support/git-jira-release-audit/git_jira_release_audit.py index 358dfd533502..f8066c44e8f5 100644 --- a/dev-support/git-jira-release-audit/git_jira_release_audit.py +++ b/dev-support/git-jira-release-audit/git_jira_release_audit.py @@ -122,16 +122,21 @@ def flush_commits(self): """Commit any pending changes to the database.""" self.conn.commit() - def apply_git_tag(self, branch, git_sha, git_tag): + def apply_git_tag(self, branch, git_tag, git_shas): """Annotate a commit in the commits database as being a part of the specified release. Args: branch (str): The name of the git branch from which the commit originates. - git_sha (str): The commit's SHA. git_tag (str): The first release tag following the commit. + git_shas: The commits' SHAs. """ - self.conn.execute("UPDATE git_commits SET git_tag = ? WHERE branch = ? AND git_sha = ?", - (git_tag, branch, git_sha)) + self.conn.execute( + ( + f"UPDATE git_commits SET git_tag = ?" + f" WHERE branch = ?" + f" AND git_sha in ({','.join('?' for _ in git_shas)})" + ), + [git_tag, branch] + git_shas) def apply_fix_version(self, jira_id, fix_version): """Annotate a Jira issue in the jira database as being part of the specified release @@ -327,12 +332,7 @@ def _extract_release_tag(self, commit): return None def _set_release_tag(self, branch, tag, shas): - cnt = 0 - for sha in shas: - self._db.apply_git_tag(branch, sha, tag) - cnt += 1 - if cnt % 50 == 0: - self._db.flush_commits() + self._db.apply_git_tag(branch, tag, shas) self._db.flush_commits() def _resolve_ambiguity(self, commit): From 54eae0fc5c890915a8c59a8d00375014bd9d7c5f Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 8 Jan 2021 14:45:45 -0800 Subject: [PATCH 319/769] HBASE-25473 [create-release] checkcompatibility.py failing with "KeyError: 'binary'" (#2862) Exclude hbase-shaded-testing-util*.jar from checkcompatibility; this jar can not be unzipped on a case-insensitive filesystem. Added some means of debug into the checkcompatibility to help when cryptic failures. Signed-off-by: Nick Dimiduk --- dev-support/checkcompatibility.py | 10 +++++++++- dev-support/create-release/release-util.sh | 10 ++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/dev-support/checkcompatibility.py b/dev-support/checkcompatibility.py index b764aaaec17d..d39599aa3ea1 100755 --- a/dev-support/checkcompatibility.py +++ b/dev-support/checkcompatibility.py @@ -229,7 +229,7 @@ def compare_results(tool_results, known_issues, compare_warnings): observed_count=tool_results[check][issue_type]) for check, known_issue_counts in known_issues.items() for issue_type, known_count in known_issue_counts.items() - if tool_results[check][issue_type] > known_count] + if compare_tool_results_count(tool_results, check, issue_type, known_count)] if not compare_warnings: unexpected_issues = [tup for tup in unexpected_issues @@ -241,6 +241,14 @@ def compare_results(tool_results, known_issues, compare_warnings): return bool(unexpected_issues) +def compare_tool_results_count(tool_results, check, issue_type, known_count): + """ Check problem counts are no more than the known count. + (This function exists just so can add in logging; previous was inlined + one-liner but this made it hard debugging) + """ + # logging.info("known_count=%s, check key=%s, tool_results=%s, issue_type=%s", + # str(known_count), str(check), str(tool_results), str(issue_type)) + return tool_results[check][issue_type] > known_count def process_java_acc_output(output): """ Process the output string to find the problems and warnings in both the diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 48e240f3402c..d907253dffe6 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -478,8 +478,18 @@ function generate_api_report { local timing_token timing_token="$(start_step)" # Generate api report. + # Filter out some jar types. Filters are tricky. Python regex on + # file basename. Exclude the saved-aside original jars... they are + # not included in resulting artifact. Also, do not include the + # hbase-shaded-testing-util.* jars. This jar is unzip'able on mac + # os x as is because has it a META_INF/LICENSE file and then a + # META_INF/license directory for the included jar's licenses; + # it fails to unjar on mac os x which this tool does making its checks + # (Its exclusion should be fine; it is just an aggregate of other jars). "${project}"/dev-support/checkcompatibility.py --annotation \ org.apache.yetus.audience.InterfaceAudience.Public \ + -e "original-hbase.*.jar" \ + -e "hbase-shaded-testing-util.*.jar" \ "$previous_tag" "$release_tag" previous_version="$(echo "${previous_tag}" | sed -e 's/rel\///')" cp "${project}/target/compat-check/report.html" "./api_compare_${previous_version}_to_${release_tag}.html" From 4925a6422ba5e2a40ee097a26bc00447e7766a1f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 9 Jan 2021 08:50:11 +0800 Subject: [PATCH 320/769] HBASE-25333 Add maven enforcer rule to ban VisibleForTesting imports (#2854) Signed-off-by: Peter Somogyi --- pom.xml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pom.xml b/pom.xml index 749ab83842bb..e4505d6afbcf 100755 --- a/pom.xml +++ b/pom.xml @@ -1341,6 +1341,19 @@ org.glassfish.jersey.** + + true + 512 + + You should never use this style of annotations(i.e, 'this is for test only') + in IA.Public or IA.LimitedPrivate classes. Use IA.Private to tell users this is + not for public use. + For IA.Private classes, use RestrictedApi annotation in error prone instead. + + + org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting + + From 686b72c44e2b72cb8f8817b0bd24d12f25820a8f Mon Sep 17 00:00:00 2001 From: "Tak Lon (Stephen) Wu" Date: Fri, 8 Jan 2021 20:16:45 -0800 Subject: [PATCH 321/769] HBASE-25249 Adding StoreContext (#2800) Adding StoreContext which contains the metadata about the HStore. This metadata can be used across the HFileWriter/Readers and other HStore consumers without the need of passing around the complete store and exposing its internals. Co-authored-by: Abhishek Khanna Signed-off-by: stack Signed-off-by: Zach York --- .../hbase/mapreduce/HFileOutputFormat2.java | 5 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 6 +- .../hadoop/hbase/regionserver/HMobStore.java | 32 ++- .../hadoop/hbase/regionserver/HStore.java | 258 +++++++++--------- .../hbase/regionserver/StoreContext.java | 194 +++++++++++++ .../hadoop/hbase/regionserver/StoreUtils.java | 25 ++ .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 6 +- .../wal/BoundedRecoveredHFilesOutputSink.java | 6 +- .../apache/hadoop/hbase/io/TestHeapSize.java | 3 +- .../hbase/regionserver/TestHRegion.java | 2 +- .../TestSecureBulkLoadManager.java | 4 +- 11 files changed, 376 insertions(+), 165 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 75b5246d2c88..ee6d5331f3f6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -369,8 +370,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = encoding == null ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) - .withDataBlockEncoding(encoding).withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) .withColumnFamily(family).withTableName(tableName); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index acc8f74a501b..2ae29385eb42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -56,9 +56,9 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -540,8 +540,8 @@ public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, boolean isCompaction) throws IOException { return createWriter(conf, fs, family, new Path(basePath, mobFileName.getFileName()), - maxKeyCount, compression, cacheConfig, cryptoContext, HStore.getChecksumType(conf), - HStore.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); + maxKeyCount, compression, cacheConfig, cryptoContext, StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 5960b8030900..7ce7f0310c7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -93,7 +93,6 @@ public class HMobStore extends HStore { private AtomicLong mobFlushedCellsSize = new AtomicLong(); private AtomicLong mobScanCellsCount = new AtomicLong(); private AtomicLong mobScanCellsSize = new AtomicLong(); - private ColumnFamilyDescriptor family; private Map> map = new ConcurrentHashMap<>(); private final IdLock keyLock = new IdLock(); // When we add a MOB reference cell to the HFile, we will add 2 tags along with it @@ -107,16 +106,15 @@ public class HMobStore extends HStore { public HMobStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { super(region, family, confParam, warmup); - this.family = family; this.mobFileCache = region.getMobFileCache(); this.homePath = MobUtils.getMobHome(conf); this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(), - family.getNameAsString()); + getColumnFamilyName()); List locations = new ArrayList<>(2); locations.add(mobFamilyPath); TableName tn = region.getTableDescriptor().getTableName(); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn) - .getEncodedName(), family.getNameAsString())); + .getEncodedName(), getColumnFamilyName())); map.put(tn, locations); List tags = new ArrayList<>(2); tags.add(MobConstants.MOB_REF_TAG); @@ -209,7 +207,7 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException { MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID() - .toString().replaceAll("-", ""), region.getRegionInfo().getEncodedName()); + .toString().replaceAll("-", ""), getHRegion().getRegionInfo().getEncodedName()); return createWriterInTmp(mobFileName, basePath, maxKeyCount, compression, isCompaction); } @@ -226,9 +224,11 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, long maxKeyCount, Compression.Algorithm compression, boolean isCompaction) throws IOException { - return MobUtils.createWriter(conf, region.getFilesystem(), family, - new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, cacheConf, - cryptoContext, checksumType, bytesPerChecksum, blocksize, BloomType.NONE, isCompaction); + return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), + new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), + getStoreContext().getEncryptionContext(), StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), getStoreContext().getBlockSize(), BloomType.NONE, + isCompaction); } /** @@ -245,10 +245,10 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio validateMobFile(sourceFile); LOG.info(" FLUSH Renaming flushed file from {} to {}", sourceFile, dstPath); Path parent = dstPath.getParent(); - if (!region.getFilesystem().exists(parent)) { - region.getFilesystem().mkdirs(parent); + if (!getFileSystem().exists(parent)) { + getFileSystem().mkdirs(parent); } - if (!region.getFilesystem().rename(sourceFile, dstPath)) { + if (!getFileSystem().rename(sourceFile, dstPath)) { throw new IOException("Failed rename of " + sourceFile + " to " + dstPath); } } @@ -261,7 +261,7 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio private void validateMobFile(Path path) throws IOException { HStoreFile storeFile = null; try { - storeFile = new HStoreFile(region.getFilesystem(), path, conf, this.cacheConf, + storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE, isPrimaryReplicaStore()); storeFile.initReader(); } catch (IOException e) { @@ -352,9 +352,11 @@ public List getLocations(TableName tableName) throws IOException { locations = map.get(tableName); if (locations == null) { locations = new ArrayList<>(2); - locations.add(MobUtils.getMobFamilyPath(conf, tableName, family.getNameAsString())); + locations.add(MobUtils.getMobFamilyPath(conf, tableName, getColumnFamilyDescriptor() + .getNameAsString())); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tableName, - MobUtils.getMobRegionInfo(tableName).getEncodedName(), family.getNameAsString())); + MobUtils.getMobRegionInfo(tableName).getEncodedName(), getColumnFamilyDescriptor() + .getNameAsString())); map.put(tableName, locations); } } finally { @@ -388,7 +390,7 @@ private MobCell readCell(List locations, String fileName, Cell search, MobFile file = null; Path path = new Path(location, fileName); try { - file = mobFileCache.openFile(fs, path, cacheConf); + file = mobFileCache.openFile(fs, path, getCacheConfig()); return readPt != -1 ? file.readCell(search, cacheMobBlocks, readPt) : file.readCell(search, cacheMobBlocks); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 3a71c230bebe..99880efece73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -157,11 +156,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected final MemStore memstore; // This stores directory in the filesystem. - protected final HRegion region; - private final ColumnFamilyDescriptor family; - private final HRegionFileSystem fs; + private final HRegion region; protected Configuration conf; - protected CacheConfig cacheConf; private long lastCompactSize = 0; volatile boolean forceMajor = false; private AtomicLong storeSize = new AtomicLong(); @@ -215,16 +211,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private final Set changedReaderObservers = Collections.newSetFromMap(new ConcurrentHashMap()); - protected final int blocksize; private HFileDataBlockEncoder dataBlockEncoder; - /** Checksum configuration */ - protected ChecksumType checksumType; - protected int bytesPerChecksum; - - // Comparing KeyValues - protected final CellComparator comparator; - final StoreEngine storeEngine; private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean(); @@ -236,7 +224,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private long blockingFileCount; private int compactionCheckMultiplier; - protected Encryption.Context cryptoContext = Encryption.Context.NONE; private AtomicLong flushedCellsCount = new AtomicLong(); private AtomicLong compactedCellsCount = new AtomicLong(); @@ -246,6 +233,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); + private final StoreContext storeContext; + /** * Constructor * @param family HColumnDescriptor for this column @@ -254,12 +243,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected HStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { - this.fs = region.getRegionFileSystem(); - - // Assemble the store's home directory and Ensure it exists. - fs.createStoreDir(family.getNameAsString()); - this.region = region; - this.family = family; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. @@ -268,18 +251,22 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, .addBytesMap(region.getTableDescriptor().getValues()) .addStringMap(family.getConfiguration()) .addBytesMap(family.getValues()); - this.blocksize = family.getBlocksize(); + + this.region = region; + this.storeContext = initializeStoreContext(family); + + // Assemble the store's home directory and Ensure it exists. + region.getRegionFileSystem().createStoreDir(family.getNameAsString()); // set block storage policy for store directory String policyName = family.getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } - this.fs.setStoragePolicy(family.getNameAsString(), policyName.trim()); + region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName.trim()); this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); - this.comparator = region.getCellComparator(); // used by ScanQueryMatcher long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); @@ -288,14 +275,11 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, long ttl = determineTTLFromFamily(family); // Why not just pass a HColumnDescriptor in here altogether? Even if have // to clone it? - scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); + scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, region.getCellComparator()); this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); - // Setting up cache configuration for this family - createCacheConf(family); - this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false); this.blockingFileCount = @@ -308,7 +292,7 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; } - this.storeEngine = createStoreEngine(this, this.conf, this.comparator); + this.storeEngine = createStoreEngine(this, this.conf, region.getCellComparator()); List hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and @@ -318,10 +302,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles); - // Initialize checksum type from name. The names are CRC32, CRC32C, etc. - this.checksumType = getChecksumType(conf); - // Initialize bytes per checksum - this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE); @@ -330,7 +310,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, "hbase.hstore.flush.retries.number must be > 0, not " + flushRetriesNumber); } - cryptoContext = EncryptionUtil.createEncryptionContext(conf, family); int confPrintThreshold = this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50); @@ -347,6 +326,32 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, cacheOnWriteLogged = false; } + private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { + return new StoreContext.Builder() + .withBlockSize(family.getBlocksize()) + .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) + .withBloomType(family.getBloomFilterType()) + .withCacheConfig(createCacheConf(family)) + .withCellComparator(region.getCellComparator()) + .withColumnFamilyDescriptor(family) + .withCompactedFilesSupplier(this::getCompactedFiles) + .withRegionFileSystem(region.getRegionFileSystem()) + .withFavoredNodesSupplier(this::getFavoredNodes) + .withFamilyStoreDirectoryPath(region.getRegionFileSystem() + .getStoreDir(family.getNameAsString())) + .withRegionCoprocessorHost(region.getCoprocessorHost()) + .build(); + } + + private InetSocketAddress[] getFavoredNodes() { + InetSocketAddress[] favoredNodes = null; + if (region.getRegionServerServices() != null) { + favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( + region.getRegionInfo().getEncodedName()); + } + return favoredNodes; + } + /** * @return MemStore Instance to use in this store. */ @@ -358,7 +363,7 @@ private MemStore getMemstore() { inMemoryCompaction = MemoryCompactionPolicy.valueOf( conf.get("hbase.systemtables.compacting.memstore.type", "NONE")); } else { - inMemoryCompaction = family.getInMemoryCompaction(); + inMemoryCompaction = getColumnFamilyDescriptor().getInMemoryCompaction(); } if (inMemoryCompaction == null) { inMemoryCompaction = @@ -368,13 +373,13 @@ private MemStore getMemstore() { switch (inMemoryCompaction) { case NONE: ms = ReflectionUtils.newInstance(DefaultMemStore.class, - new Object[] { conf, this.comparator, + new Object[] { conf, getComparator(), this.getHRegion().getRegionServicesForStores()}); break; default: Class clz = conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class); - ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this, + ms = ReflectionUtils.newInstance(clz, new Object[]{conf, getComparator(), this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction}); } return ms; @@ -384,10 +389,12 @@ private MemStore getMemstore() { * Creates the cache config. * @param family The current column family. */ - protected void createCacheConf(final ColumnFamilyDescriptor family) { - this.cacheConf = new CacheConfig(conf, family, region.getBlockCache(), + protected CacheConfig createCacheConf(final ColumnFamilyDescriptor family) { + CacheConfig cacheConf = new CacheConfig(conf, family, region.getBlockCache(), region.getRegionServicesForStores().getByteBuffAllocator()); - LOG.info("Created cacheConfig: " + this.getCacheConfig() + " for " + this); + LOG.info("Created cacheConfig: {}, for column family {} of region {} ", cacheConf, + family.getNameAsString(), region.getRegionInfo().getEncodedName()); + return cacheConf; } /** @@ -400,7 +407,7 @@ protected void createCacheConf(final ColumnFamilyDescriptor family) { */ protected StoreEngine createStoreEngine(HStore store, Configuration conf, CellComparator kvComparator) throws IOException { - return StoreEngine.create(store, conf, comparator); + return StoreEngine.create(store, conf, kvComparator); } /** @@ -421,9 +428,13 @@ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { return ttl; } + StoreContext getStoreContext() { + return storeContext; + } + @Override public String getColumnFamilyName() { - return this.family.getNameAsString(); + return this.storeContext.getFamily().getNameAsString(); } @Override @@ -433,11 +444,11 @@ public TableName getTableName() { @Override public FileSystem getFileSystem() { - return this.fs.getFileSystem(); + return storeContext.getRegionFileSystem().getFileSystem(); } public HRegionFileSystem getRegionFileSystem() { - return this.fs; + return storeContext.getRegionFileSystem(); } /* Implementation of StoreConfigInformation */ @@ -474,33 +485,10 @@ public long getBlockingFileCount() { } /* End implementation of StoreConfigInformation */ - /** - * Returns the configured bytesPerChecksum value. - * @param conf The configuration - * @return The bytesPerChecksum that is set in the configuration - */ - public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); - } - - /** - * Returns the configured checksum algorithm. - * @param conf The configuration - * @return The checksum algorithm that is set in the configuration - */ - public static ChecksumType getChecksumType(Configuration conf) { - String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); - if (checksumName == null) { - return ChecksumType.getDefaultChecksumType(); - } else { - return ChecksumType.nameToType(checksumName); - } - } @Override public ColumnFamilyDescriptor getColumnFamilyDescriptor() { - return this.family; + return this.storeContext.getFamily(); } @Override @@ -559,7 +547,7 @@ void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) { * from the given directory. */ private List loadStoreFiles(boolean warmup) throws IOException { - Collection files = fs.getStoreFiles(getColumnFamilyName()); + Collection files = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); return openStoreFiles(files, warmup); } @@ -610,7 +598,7 @@ private List openStoreFiles(Collection files, boolean if (ioe != null) { // close StoreFile readers boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (HStoreFile file : results) { try { if (file != null) { @@ -638,7 +626,8 @@ private List openStoreFiles(Collection files, boolean results.removeAll(filesToRemove); if (!filesToRemove.isEmpty() && this.isPrimaryReplicaStore()) { LOG.debug("Moving the files {} to archive", filesToRemove); - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); + getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), + filesToRemove); } } @@ -647,7 +636,7 @@ private List openStoreFiles(Collection files, boolean @Override public void refreshStoreFiles() throws IOException { - Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); + Collection newFiles = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } @@ -658,7 +647,7 @@ public void refreshStoreFiles() throws IOException { public void refreshStoreFiles(Collection newFiles) throws IOException { List storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { - storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); + storeFiles.add(getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file)); } refreshStoreFilesInternal(storeFiles); } @@ -735,7 +724,8 @@ protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); - HStoreFile storeFile = new HStoreFile(info, this.family.getBloomFilterType(), this.cacheConf); + HStoreFile storeFile = new HStoreFile(info, getColumnFamilyDescriptor().getBloomFilterType(), + getCacheConfig()); storeFile.initReader(); return storeFile; } @@ -818,7 +808,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this); FileSystem srcFs = srcPath.getFileSystem(conf); srcFs.access(srcPath, FsAction.READ_WRITE); - reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf); + reader = HFile.createReader(srcFs, srcPath, getCacheConfig(), isPrimaryReplicaStore(), conf); Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); @@ -855,7 +845,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { do { Cell cell = scanner.getCell(); if (prevCell != null) { - if (comparator.compareRows(prevCell, cell) > 0) { + if (getComparator().compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" @@ -892,13 +882,13 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { */ public Pair preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); - return fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); + return getRegionFileSystem().bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); } public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { Path srcPath = new Path(srcPathStr); try { - fs.commitStoreFile(srcPath, dstPath); + getRegionFileSystem().commitStoreFile(srcPath, dstPath); } finally { if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); @@ -964,8 +954,8 @@ public ImmutableCollection close() throws IOException { storeEngine.getStoreFileManager().clearCompactedFiles(); // clear the compacted files if (CollectionUtils.isNotEmpty(compactedfiles)) { - removeCompactedfiles(compactedfiles, cacheConf != null ? - cacheConf.shouldEvictOnClose() : true); + removeCompactedfiles(compactedfiles, getCacheConfig() != null ? + getCacheConfig().shouldEvictOnClose() : true); } if (!result.isEmpty()) { // initialize the thread pool for closing store files in parallel. @@ -981,7 +971,7 @@ public ImmutableCollection close() throws IOException { @Override public Void call() throws IOException { boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; f.closeStoreFile(evictOnClose); return null; } @@ -1092,7 +1082,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { FileSystem srcFs = path.getFileSystem(conf); srcFs.access(path, FsAction.READ_WRITE); try (HFile.Reader reader = - HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) { + HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional lk = reader.getLastKey(); @@ -1104,7 +1094,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { } } - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); @@ -1129,7 +1119,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); HStoreFile sf = createStoreFileAndReader(dstPath); @@ -1167,12 +1157,13 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm boolean shouldDropBehind, long totalCompactedFilesSize, String fileStoragePolicy) throws IOException { // creating new cache config for each new writer + final CacheConfig cacheConf = getCacheConfig(); final CacheConfig writerCacheConf = new CacheConfig(cacheConf); if (isCompaction) { // Don't cache data on write on compactions, unless specifically configured to do so // Cache only when total file size remains lower than configured threshold final boolean cacheCompactedBlocksOnWrite = - cacheConf.shouldCacheCompactedBlocksOnWrite(); + getCacheConfig().shouldCacheCompactedBlocksOnWrite(); // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well @@ -1206,53 +1197,48 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm } } } - InetSocketAddress[] favoredNodes = null; - if (region.getRegionServerServices() != null) { - favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( - region.getRegionInfo().getEncodedName()); - } + Encryption.Context encryptionContext = storeContext.getEncryptionContext(); HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, - cryptoContext); - Path familyTempDir = new Path(fs.getTempDir(), family.getNameAsString()); - StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, - this.getFileSystem()) - .withOutputDir(familyTempDir) - .withBloomType(family.getBloomFilterType()) - .withMaxKeyCount(maxKeyCount) - .withFavoredNodes(favoredNodes) - .withFileContext(hFileContext) - .withShouldDropCacheBehind(shouldDropBehind) - .withCompactedFilesSupplier(this::getCompactedFiles) - .withFileStoragePolicy(fileStoragePolicy); + encryptionContext); + Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName()); + StoreFileWriter.Builder builder = + new StoreFileWriter.Builder(conf, writerCacheConf, getFileSystem()) + .withOutputDir(familyTempDir) + .withBloomType(storeContext.getBloomFilterType()) + .withMaxKeyCount(maxKeyCount) + .withFavoredNodes(storeContext.getFavoredNodes()) + .withFileContext(hFileContext) + .withShouldDropCacheBehind(shouldDropBehind) + .withCompactedFilesSupplier(storeContext.getCompactedFilesSupplier()) + .withFileStoragePolicy(fileStoragePolicy); return builder.build(); } private HFileContext createFileContext(Compression.Algorithm compression, - boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) { + boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } + ColumnFamilyDescriptor family = getColumnFamilyDescriptor(); HFileContext hFileContext = new HFileContextBuilder() - .withIncludesMvcc(includeMVCCReadpoint) - .withIncludesTags(includesTag) - .withCompression(compression) - .withCompressTags(family.isCompressTags()) - .withChecksumType(checksumType) - .withBytesPerCheckSum(bytesPerChecksum) - .withBlockSize(blocksize) - .withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()) - .withEncryptionContext(cryptoContext) - .withCreateTime(EnvironmentEdgeManager.currentTime()) - .withColumnFamily(family.getName()) - .withTableName(region.getTableDescriptor() - .getTableName().getName()) - .withCellComparator(this.comparator) - .build(); + .withIncludesMvcc(includeMVCCReadpoint) + .withIncludesTags(includesTag) + .withCompression(compression) + .withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(family.getBlocksize()) + .withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(encryptionContext) + .withCreateTime(EnvironmentEdgeManager.currentTime()) + .withColumnFamily(getColumnFamilyDescriptor().getName()) + .withTableName(getTableName().getName()) + .withCellComparator(getComparator()) + .build(); return hFileContext; } - private long getTotalSize(Collection sfs) { return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum(); } @@ -1529,7 +1515,7 @@ public List compact(CompactionContext compaction, // Ready to go. Have list of files to compact. LOG.info("Starting compaction of " + filesToCompact + - " into tmpdir=" + fs.getTempDir() + ", totalSize=" + + " into tmpdir=" + getRegionFileSystem().getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); return doCompaction(cr, filesToCompact, user, compactionStartTime, @@ -1579,7 +1565,7 @@ private void setStoragePolicyFromFileName(List newFiles) throws IOExceptio String prefix = HConstants.STORAGE_POLICY_PREFIX; for (Path newFile : newFiles) { if (newFile.getParent().getName().startsWith(prefix)) { - CommonFSUtils.setStoragePolicy(fs.getFileSystem(), newFile, + CommonFSUtils.setStoragePolicy(getRegionFileSystem().getFileSystem(), newFile, newFile.getParent().getName().substring(prefix.length())); } } @@ -1604,7 +1590,7 @@ private List moveCompactedFilesIntoPlace(CompactionRequestImpl cr, HStoreFile moveFileIntoPlace(Path newFile) throws IOException { validateStoreFile(newFile); // Move the file into the right spot - Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile); + Path destPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), newFile); return createStoreFileAndReader(destPath); } @@ -1624,8 +1610,8 @@ private void writeCompactionWalRecord(Collection filesCompacted, newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList()); RegionInfo info = this.region.getRegionInfo(); CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info, - family.getName(), inputPaths, outputPaths, - fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString())); + getColumnFamilyDescriptor().getName(), inputPaths, outputPaths, + getRegionFileSystem().getStoreDir(getColumnFamilyDescriptor().getNameAsString())); // Fix reaching into Region to get the maxWaitForSeqId. // Does this method belong in Region altogether given it is making so many references up there? // Could be Region#writeCompactionMarker(compactionDescriptor); @@ -1752,7 +1738,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick String familyName = this.getColumnFamilyName(); Set inputFiles = new HashSet<>(); for (String compactionInput : compactionInputs) { - Path inputPath = fs.getStoreFilePath(familyName, compactionInput); + Path inputPath = getRegionFileSystem().getStoreFilePath(familyName, compactionInput); inputFiles.add(inputPath.getName()); } @@ -1772,7 +1758,8 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick compactionOutputs.remove(sf.getPath().getName()); } for (String compactionOutput : compactionOutputs) { - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), compactionOutput); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); outputStoreFiles.add(storeFile); } @@ -2092,7 +2079,7 @@ int versionsToReturn(final int wantedVersions) { throw new IllegalArgumentException("Number of versions must be > 0"); } // Make sure we do not return more than maximum versions for this store. - int maxVersions = this.family.getMaxVersions(); + int maxVersions = getColumnFamilyDescriptor().getMaxVersions(); return wantedVersions > maxVersions ? maxVersions: wantedVersions; } @@ -2367,7 +2354,7 @@ public RegionCoprocessorHost getCoprocessorHost() { @Override public RegionInfo getRegionInfo() { - return this.fs.getRegionInfo(); + return getRegionFileSystem().getRegionInfo(); } @Override @@ -2509,7 +2496,8 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); @@ -2559,7 +2547,7 @@ public boolean needsCompaction() { * @return cache configuration for this Store. */ public CacheConfig getCacheConfig() { - return this.cacheConf; + return storeContext.getCacheConf(); } public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); @@ -2573,12 +2561,12 @@ public CacheConfig getCacheConfig() { @Override public long heapSize() { MemStoreSize memstoreSize = this.memstore.size(); - return DEEP_OVERHEAD + memstoreSize.getHeapSize(); + return DEEP_OVERHEAD + memstoreSize.getHeapSize() + storeContext.heapSize(); } @Override public CellComparator getComparator() { - return comparator; + return storeContext.getComparator(); } public ScanInfo getScanInfo() { @@ -2652,7 +2640,7 @@ protected OffPeakHours getOffPeakHours() { public void onConfigurationChange(Configuration conf) { this.conf = new CompoundConfiguration() .add(conf) - .addBytesMap(family.getValues()); + .addBytesMap(getColumnFamilyDescriptor().getValues()); this.storeEngine.compactionPolicy.setConf(conf); this.offPeakHours = OffPeakHours.getInstance(conf); } @@ -2784,8 +2772,8 @@ private void removeCompactedfiles(Collection compactedfiles, boolean LOG.debug("Moving the files {} to archive", filesToRemove); // Only if this is successful it has to be removed try { - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), - filesToRemove); + getRegionFileSystem() + .removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); } catch (FailedArchiveException fae) { // Even if archiving some files failed, we still need to clear out any of the // files which were successfully archived. Otherwise we will receive a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java new file mode 100644 index 000000000000..26233505db73 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.function.Supplier; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This carries the immutable information and references on some of the meta data about the HStore. + * This meta data can be used across the HFileWriter/Readers and other HStore consumers without the + * need of passing around the complete store. + */ +@InterfaceAudience.Private +public final class StoreContext implements HeapSize { + public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); + + private final int blockSize; + private final Encryption.Context encryptionContext; + private final CacheConfig cacheConf; + private final HRegionFileSystem regionFileSystem; + private final CellComparator comparator; + private final BloomType bloomFilterType; + private final Supplier> compactedFilesSupplier; + private final Supplier favoredNodesSupplier; + private final ColumnFamilyDescriptor family; + private final Path familyStoreDirectoryPath; + private final RegionCoprocessorHost coprocessorHost; + + private StoreContext(Builder builder) { + this.blockSize = builder.blockSize; + this.encryptionContext = builder.encryptionContext; + this.cacheConf = builder.cacheConf; + this.regionFileSystem = builder.regionFileSystem; + this.comparator = builder.comparator; + this.bloomFilterType = builder.bloomFilterType; + this.compactedFilesSupplier = builder.compactedFilesSupplier; + this.favoredNodesSupplier = builder.favoredNodesSupplier; + this.family = builder.family; + this.familyStoreDirectoryPath = builder.familyStoreDirectoryPath; + this.coprocessorHost = builder.coprocessorHost; + } + + public int getBlockSize() { + return blockSize; + } + + public Encryption.Context getEncryptionContext() { + return encryptionContext; + } + + public CacheConfig getCacheConf() { + return cacheConf; + } + + public HRegionFileSystem getRegionFileSystem() { + return regionFileSystem; + } + + public CellComparator getComparator() { + return comparator; + } + + public BloomType getBloomFilterType() { + return bloomFilterType; + } + + public Supplier> getCompactedFilesSupplier() { + return compactedFilesSupplier; + } + + public InetSocketAddress[] getFavoredNodes() { + return favoredNodesSupplier.get(); + } + + public ColumnFamilyDescriptor getFamily() { + return family; + } + + public Path getFamilyStoreDirectoryPath() { + return familyStoreDirectoryPath; + } + + public RegionCoprocessorHost getCoprocessorHost() { + return coprocessorHost; + } + + public static Builder getBuilder() { + return new Builder(); + } + + @Override + public long heapSize() { + return FIXED_OVERHEAD; + } + + public static class Builder { + private int blockSize; + private Encryption.Context encryptionContext; + private CacheConfig cacheConf; + private HRegionFileSystem regionFileSystem; + private CellComparator comparator; + private BloomType bloomFilterType; + private Supplier> compactedFilesSupplier; + private Supplier favoredNodesSupplier; + private ColumnFamilyDescriptor family; + private Path familyStoreDirectoryPath; + private RegionCoprocessorHost coprocessorHost; + + public Builder withBlockSize(int blockSize) { + this.blockSize = blockSize; + return this; + } + + public Builder withEncryptionContext(Encryption.Context encryptionContext) { + this.encryptionContext = encryptionContext; + return this; + } + + public Builder withCacheConfig(CacheConfig cacheConf) { + this.cacheConf = cacheConf; + return this; + } + + public Builder withRegionFileSystem(HRegionFileSystem regionFileSystem) { + this.regionFileSystem = regionFileSystem; + return this; + } + + public Builder withCellComparator(CellComparator comparator) { + this.comparator = comparator; + return this; + } + + public Builder withBloomType(BloomType bloomFilterType) { + this.bloomFilterType = bloomFilterType; + return this; + } + + public Builder withCompactedFilesSupplier(Supplier> + compactedFilesSupplier) { + this.compactedFilesSupplier = compactedFilesSupplier; + return this; + } + + public Builder withFavoredNodesSupplier(Supplier favoredNodesSupplier) { + this.favoredNodesSupplier = favoredNodesSupplier; + return this; + } + + public Builder withColumnFamilyDescriptor(ColumnFamilyDescriptor family) { + this.family = family; + return this; + } + + public Builder withFamilyStoreDirectoryPath(Path familyStoreDirectoryPath) { + this.familyStoreDirectoryPath = familyStoreDirectoryPath; + return this; + } + + public Builder withRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { + this.coprocessorHost = coprocessorHost; + return this; + } + + public StoreContext build() { + return new StoreContext(this); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 0e4f6c2bb8a4..ac5955feca7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -24,9 +24,13 @@ import java.util.OptionalInt; import java.util.OptionalLong; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -136,4 +140,25 @@ static Optional getSplitPoint(Collection storefiles, return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) : Optional.empty(); } + + /** + * Returns the configured checksum algorithm. + * @param conf The configuration + * @return The checksum algorithm that is set in the configuration + */ + public static ChecksumType getChecksumType(Configuration conf) { + return ChecksumType.nameToType( + conf.get(HConstants.CHECKSUM_TYPE_NAME, ChecksumType.getDefaultChecksumType().getName())); + } + + /** + * Returns the configured bytesPerChecksum value. + * @param conf The configuration + * @return The bytesPerChecksum that is set in the configuration + */ + public static int getBytesPerChecksum(Configuration conf) { + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, + HFile.DEFAULT_BYTES_PER_CHECKSUM); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index ec9a59c7bf5a..b0b086e145a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -83,9 +83,9 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; @@ -749,8 +749,8 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Algorithm compression = familyDescriptor.getCompressionType(); BloomType bloomFilterType = familyDescriptor.getBloomFilterType(); HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) .build(); halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 9623bd1c7220..50bc5fe62fb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.CellSet; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.EntryBuffers.RegionEntryBuffer; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -199,8 +199,8 @@ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String r new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) .withOutputDir(outputDir); HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(HStore.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(HStore.getBytesPerChecksum(walSplitter.conf)). + withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). + withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). withCellComparator(isMetaTable? MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); return writerBuilder.withFileContext(hFileContext).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 3d713052559e..3f326a30cfdf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.MemStoreCompactor; import org.apache.hadoop.hbase.regionserver.MutableSegment; import org.apache.hadoop.hbase.regionserver.Segment; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.NonSyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.SyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector; @@ -606,7 +607,7 @@ public void testObjectSize() throws IOException { @Test public void testAutoCalcFixedOverHead() { Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class, - HFileBlock.class, HStore.class, LruBlockCache.class }; + HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class }; for (Class cl : classList) { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index b56f96a51149..74f240a60198 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -7645,7 +7645,7 @@ protected List doCompaction(CompactionRequestImpl cr, LOG.warn("hbase.hstore.compaction.complete is set to false"); List sfs = new ArrayList<>(newFiles.size()); final boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (Path newFile : newFiles) { // Create storefile around what we wrote with a reader on it. HStoreFile sf = createStoreFileAndReader(newFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 12cf57671f9c..88f201efff6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -221,8 +221,8 @@ private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { .withIncludesTags(true) .withCompression(compression) .withCompressTags(family.isCompressTags()) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) .withBlockSize(family.getBlocksize()) .withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding()) From c218e576fe54df208e277365f1ac24f993f2a4b1 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 12 Jan 2021 16:08:54 +0800 Subject: [PATCH 322/769] Revert "HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml" This reverts commit 49aba571813f649a2ff4482a2209ee9910cc72c3. --- .../src/main/resources/hbase-default.xml | 4 +- .../hadoop/hbase/TestHBaseConfiguration.java | 17 -------- .../src/test/resources/hdfs-default.xml | 42 ------------------- .../src/test/resources/hdfs-scr-enabled.xml | 42 ------------------- 4 files changed, 2 insertions(+), 103 deletions(-) delete mode 100644 hbase-common/src/test/resources/hdfs-default.xml delete mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 20f3881edb2c..9092dd147198 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - + false If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - + none This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 1144f1daf351..6a0b4283ed03 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -115,23 +115,6 @@ public void testSecurityConfCaseInsensitive() { conf.set("hbase.security.authentication", "KERBeros"); Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } - - @Test - public void testGetConfigOfShortcircuitRead() throws Exception { - Configuration conf = HBaseConfiguration.create(); - Configuration.addDefaultResource("hdfs-default.xml"); - assertEquals("hdfs-default.xml", - conf.getPropertySources("dfs.client.read.shortcircuit")[0]); - assertEquals("false", conf.get("dfs.client.read.shortcircuit")); - assertNull(conf.get("dfs.domain.socket.path")); - Configuration.addDefaultResource("hdfs-scr-enabled.xml"); - assertEquals("hdfs-scr-enabled.xml", - conf.getPropertySources("dfs.client.read.shortcircuit")[0]); - assertEquals("hdfs-scr-enabled.xml", - conf.getPropertySources("dfs.domain.socket.path")[0]); - assertEquals("true", conf.get("dfs.client.read.shortcircuit")); - assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); - } private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = diff --git a/hbase-common/src/test/resources/hdfs-default.xml b/hbase-common/src/test/resources/hdfs-default.xml deleted file mode 100644 index fdb3c36edc87..000000000000 --- a/hbase-common/src/test/resources/hdfs-default.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - dfs.client.read.shortcircuit - false - - If set to true, this configuration parameter enables short-circuit local - reads. - - - - dfs.domain.socket.path - - - Optional. This is a path to a UNIX domain socket that will be used for - communication between the DataNode and local HDFS clients. - If the string "_PORT" is present in this path, it will be replaced by the - TCP port of the DataNode. - - - diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml deleted file mode 100644 index 8594494782c5..000000000000 --- a/hbase-common/src/test/resources/hdfs-scr-enabled.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - dfs.client.read.shortcircuit - true - - If set to true, this configuration parameter enables short-circuit local - reads. - - - - dfs.domain.socket.path - /var/lib/hadoop-hdfs/dn_socket - - Optional. This is a path to a UNIX domain socket that will be used for - communication between the DataNode and local HDFS clients. - If the string "_PORT" is present in this path, it will be replaced by the - TCP port of the DataNode. - - - From a3913a39c3d1a542a31658103be35185a397016a Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 13 Jan 2021 05:01:26 +0530 Subject: [PATCH 323/769] HBASE-25211 : Rack awareness in RegionMover (#2795) Signed-off-by: Andrew Purtell --- .../hadoop/hbase/master/RackManager.java | 3 - .../apache/hadoop/hbase/util/RegionMover.java | 68 ++++++- .../hadoop/hbase/util/TestRegionMover3.java | 188 ++++++++++++++++++ 3 files changed, 254 insertions(+), 5 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java index 3ed20065a672..54ccac0cb629 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java @@ -22,8 +22,6 @@ import java.util.List; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -36,7 +34,6 @@ */ @InterfaceAudience.Private public class RackManager { - private static final Logger LOG = LoggerFactory.getLogger(RackManager.class); public static final String UNKNOWN_RACK = "Unknown Rack"; private DNSToSwitchMapping switchMapping; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 08042efda68f..210e9e17a39f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; @@ -86,6 +87,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { public static final int DEFAULT_MOVE_RETRIES_MAX = 5; public static final int DEFAULT_MOVE_WAIT_MAX = 60; public static final int DEFAULT_SERVERSTART_WAIT_MAX = 180; + private final RackManager rackManager; private static final Logger LOG = LoggerFactory.getLogger(RegionMover.class); @@ -114,9 +116,16 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { setConf(builder.conf); this.conn = ConnectionFactory.createConnection(conf); this.admin = conn.getAdmin(); + // Only while running unit tests, builder.rackManager will not be null for the convenience of + // providing custom rackManager. Otherwise for regular workflow/user triggered action, + // builder.rackManager is supposed to be null. Hence, setter of builder.rackManager is + // provided as @InterfaceAudience.Private and it is commented that this is just + // to be used by unit test. + rackManager = builder.rackManager == null ? new RackManager(conf) : builder.rackManager; } private RegionMover() { + rackManager = new RackManager(conf); } @Override @@ -143,6 +152,7 @@ public static class RegionMoverBuilder { @InterfaceAudience.Private final int port; private final Configuration conf; + private RackManager rackManager; public RegionMoverBuilder(String hostname) { this(hostname, createConf()); @@ -245,6 +255,19 @@ public RegionMoverBuilder timeout(int timeout) { return this; } + /** + * Set specific rackManager implementation. + * This setter method is for testing purpose only. + * + * @param rackManager rackManager impl + * @return RegionMoverBuilder object + */ + @InterfaceAudience.Private + public RegionMoverBuilder rackManager(RackManager rackManager) { + this.rackManager = rackManager; + return this; + } + /** * This method builds the appropriate RegionMover object which can then be used to load/unload * using load and unload methods @@ -328,9 +351,31 @@ private void loadRegions(List regionsToMove) * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions * to hostnames provided in {@link #designatedFile} + * * @return true if unloading succeeded, false otherwise */ public boolean unload() throws InterruptedException, ExecutionException, TimeoutException { + return unloadRegions(false); + } + + /** + * Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.In + * noAck mode we do not make sure that region is successfully online on the target region + * server,hence it is best effort.We do not unload regions to hostnames given in + * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions + * to hostnames provided in {@link #designatedFile}. + * While unloading regions, destination RegionServers are selected from different rack i.e + * regions should not move to any RegionServers that belong to same rack as source RegionServer. + * + * @return true if unloading succeeded, false otherwise + */ + public boolean unloadFromRack() + throws InterruptedException, ExecutionException, TimeoutException { + return unloadRegions(true); + } + + private boolean unloadRegions(boolean unloadFromRack) throws InterruptedException, + ExecutionException, TimeoutException { deleteFile(this.filename); ExecutorService unloadPool = Executors.newFixedThreadPool(1); Future unloadTask = unloadPool.submit(() -> { @@ -355,6 +400,23 @@ public boolean unload() throws InterruptedException, ExecutionException, Timeout // Remove RS present in the exclude file includeExcludeRegionServers(excludeFile, regionServers, false); + if (unloadFromRack) { + // remove regionServers that belong to same rack (as source host) since the goal is to + // unload regions from source regionServer to destination regionServers + // that belong to different rack only. + String sourceRack = rackManager.getRack(server); + List racks = rackManager.getRack(regionServers); + Iterator iterator = regionServers.iterator(); + int i = 0; + while (iterator.hasNext()) { + iterator.next(); + if (racks.size() > i && racks.get(i) != null && racks.get(i).equals(sourceRack)) { + iterator.remove(); + } + i++; + } + } + // Remove decommissioned RS Set decommissionedRS = new HashSet<>(admin.listDecommissionedRegionServers()); if (CollectionUtils.isNotEmpty(decommissionedRS)) { @@ -651,7 +713,7 @@ private void stripMaster(List regionServers) throws IOException { private ServerName stripServer(List regionServers, String hostname, int port) { for (Iterator iter = regionServers.iterator(); iter.hasNext();) { ServerName server = iter.next(); - if (server.getAddress().getHostname().equalsIgnoreCase(hostname) && + if (server.getAddress().getHostName().equalsIgnoreCase(hostname) && server.getAddress().getPort() == port) { iter.remove(); return server; @@ -663,7 +725,7 @@ private ServerName stripServer(List regionServers, String hostname, @Override protected void addOptions() { this.addRequiredOptWithArg("r", "regionserverhost", "region server |"); - this.addRequiredOptWithArg("o", "operation", "Expected: load/unload"); + this.addRequiredOptWithArg("o", "operation", "Expected: load/unload/unload_from_rack"); this.addOptWithArg("m", "maxthreads", "Define the maximum number of threads to use to unload and reload the regions"); this.addOptWithArg("x", "excludefile", @@ -716,6 +778,8 @@ protected int doWork() throws Exception { success = rm.load(); } else if (loadUnload.equalsIgnoreCase("unload")) { success = rm.unload(); + } else if (loadUnload.equalsIgnoreCase("unload_from_rack")) { + success = rm.unloadFromRack(); } else { printUsage(); success = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java new file mode 100644 index 000000000000..1903fa6bf5b6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.RackManager; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + + +@Category({ MiscTests.class, LargeTests.class}) +public class TestRegionMover3 { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionMover3.class); + + @Rule + public TestName name = new TestName(); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static ServerName rs0; + private static ServerName rs1; + private static ServerName rs2; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + rs0 = cluster.getRegionServer(0).getServerName(); + rs1 = cluster.getRegionServer(1).getServerName(); + rs2 = cluster.getRegionServer(2).getServerName(); + TEST_UTIL.getAdmin().balancerSwitch(false, true); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setUp() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + int startKey = 0; + int endKey = 80000; + TEST_UTIL.getAdmin().createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9); + } + + @Test + public void testRegionUnloadWithRack() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + Admin admin = TEST_UTIL.getAdmin(); + Table table = TEST_UTIL.getConnection().getTable(tableName); + List puts = IntStream.range(10, 50000) + .mapToObj(i -> new Put(Bytes.toBytes(i)) + .addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))) + .collect(Collectors.toList()); + table.put(puts); + admin.flush(tableName); + admin.compact(tableName); + Thread.sleep(3000); + HRegionServer hRegionServer0 = cluster.getRegionServer(0); + HRegionServer hRegionServer1 = cluster.getRegionServer(1); + HRegionServer hRegionServer2 = cluster.getRegionServer(2); + int numRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int numRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + int numRegions2 = hRegionServer2.getNumberOfOnlineRegions(); + + Assert.assertTrue(numRegions0 >= 3); + Assert.assertTrue(numRegions1 >= 3); + Assert.assertTrue(numRegions2 >= 3); + int totalRegions = numRegions0 + numRegions1 + numRegions2; + + // source RS: rs0 + String sourceRSName = rs0.getAddress().toString(); + + // move all regions from rs1 to rs0 + for (HRegion region : hRegionServer1.getRegions()) { + TEST_UTIL.getAdmin().move(region.getRegionInfo().getEncodedNameAsBytes(), rs0); + } + TEST_UTIL.waitFor(5000, () -> { + int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int newNumRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + return newNumRegions1 == 0 && newNumRegions0 == (numRegions0 + numRegions1); + }); + + // regionMover obj on rs0. While unloading regions from rs0 + // with default rackManager, which resolves "/default-rack" for each server, no region + // is moved while using unloadFromRack() as all rs belong to same rack. + RegionMover.RegionMoverBuilder rmBuilder = + new RegionMover.RegionMoverBuilder(sourceRSName, TEST_UTIL.getConfiguration()) + .ack(true) + .maxthreads(8); + try (RegionMover regionMover = rmBuilder.build()) { + regionMover.unloadFromRack(); + int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int newNumRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + int newNumRegions2 = hRegionServer2.getNumberOfOnlineRegions(); + Assert.assertEquals(0, newNumRegions1); + Assert.assertEquals(totalRegions, newNumRegions0 + newNumRegions2); + } + + // use custom rackManager, which resolves "rack-1" for rs0 and rs1, + // while "rack-2" for rs2. Hence, unloadFromRack() from rs0 should move all + // regions that belong to rs0 to rs2 only, and nothing should be moved to rs1 + // as rs0 and rs1 belong to same rack. + rmBuilder.rackManager(new MockRackManager()); + try (RegionMover regionMover = rmBuilder.build()) { + regionMover.unloadFromRack(); + int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int newNumRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + int newNumRegions2 = hRegionServer2.getNumberOfOnlineRegions(); + Assert.assertEquals(0, newNumRegions0); + Assert.assertEquals(0, newNumRegions1); + Assert.assertEquals(totalRegions, newNumRegions2); + } + + } + + private static class MockRackManager extends RackManager { + + private static final String RACK_2 = "rack-2"; + private static final String RACK_1 = "rack-1"; + + @Override + public String getRack(ServerName server) { + return rs2.equals(server) ? RACK_2 : RACK_1; + } + + @Override + public List getRack(List servers) { + List racks = new ArrayList<>(); + servers.forEach(serverName -> { + if (rs2.equals(serverName)) { + racks.add(RACK_2); + } else { + racks.add(RACK_1); + } + }); + return racks; + } + } + +} From 13dd54af8960212fcbb0f69d5d6fb666c471c50d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 13 Jan 2021 09:56:58 +0800 Subject: [PATCH 324/769] HBASE-25476 Enable error prone check in pre commit (#2860) Signed-off-by: Guanghao Zhang --- dev-support/Jenkinsfile_GitHub | 4 +++- dev-support/jenkins_precommit_github_yetus.sh | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index d25386717739..a725f1dbce4f 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -38,7 +38,7 @@ pipeline { YETUS_DRIVER_REL = "${SRC_REL}/dev-support/jenkins_precommit_github_yetus.sh" // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' YETUS_VERSION = 'rel/0.12.0' - GENERAL_CHECK_PLUGINS = 'all,-compile,-javac,-javadoc,-jira,-shadedjars,-unit' + GENERAL_CHECK_PLUGINS = 'all,-javadoc,-jira,-shadedjars,-unit' JDK_SPECIFIC_PLUGINS = 'compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars,unit' // output from surefire; sadly the archive function in yetus only works on file names. ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' @@ -168,6 +168,7 @@ pipeline { BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + SKIP_ERRORPRONE = true } steps { dir("${SOURCEDIR}") { @@ -268,6 +269,7 @@ pipeline { BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + SKIP_ERRORPRONE = true } steps { dir("${SOURCEDIR}") { diff --git a/dev-support/jenkins_precommit_github_yetus.sh b/dev-support/jenkins_precommit_github_yetus.sh index 1c489d6f28bb..5bb2b1b755a4 100755 --- a/dev-support/jenkins_precommit_github_yetus.sh +++ b/dev-support/jenkins_precommit_github_yetus.sh @@ -122,7 +122,10 @@ YETUS_ARGS+=("--whitespace-tabs-ignore-list=.*/generated/.*") YETUS_ARGS+=("--tests-filter=${TESTS_FILTER}") YETUS_ARGS+=("--personality=${SOURCEDIR}/dev-support/hbase-personality.sh") YETUS_ARGS+=("--quick-hadoopcheck") -YETUS_ARGS+=("--skip-errorprone") +if [[ "${SKIP_ERRORPRONE}" = "true" ]]; then + # skip error prone + YETUS_ARGS+=("--skip-errorprone") +fi # effectively treat dev-support as a custom maven module YETUS_ARGS+=("--skip-dirs=dev-support") # For testing with specific hadoop version. Activates corresponding profile in maven runs. From dfefff7e59391dad26dd8851c6e81fdc98c4c9fe Mon Sep 17 00:00:00 2001 From: caroliney14 Date: Tue, 12 Jan 2021 22:40:12 -0800 Subject: [PATCH 325/769] HBASE-25329 Dump ritsOverThreshold in logs (#2761) Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani --- .../hadoop/hbase/master/assignment/AssignmentManager.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 4d0e165456d1..1eb39028f454 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -1425,6 +1425,13 @@ protected void update(final AssignmentManager am) { this.statTimestamp = EnvironmentEdgeManager.currentTime(); update(regionStates.getRegionsStateInTransition(), statTimestamp); update(regionStates.getRegionFailedOpen(), statTimestamp); + + if (LOG.isDebugEnabled() && ritsOverThreshold != null && !ritsOverThreshold.isEmpty()) { + LOG.debug("RITs over threshold: {}", + ritsOverThreshold.entrySet().stream() + .map(e -> e.getKey() + ":" + e.getValue().getState().name()) + .collect(Collectors.joining("\n"))); + } } private void update(final Collection regions, final long currentTime) { From babcebd7c2e256086cfa4c6d6108e95ce6e7ae35 Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Wed, 13 Jan 2021 23:19:38 +0530 Subject: [PATCH 326/769] HBASE-25503: HBase code download is failing on windows with invalid path error (#2880) --- ...HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf} | Bin 1 file changed, 0 insertions(+), 0 deletions(-) rename dev-support/design-docs/{HBASE-18070-ROOT_hbase:meta_Region_Replicas.pdf => HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf} (100%) diff --git a/dev-support/design-docs/HBASE-18070-ROOT_hbase:meta_Region_Replicas.pdf b/dev-support/design-docs/HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf similarity index 100% rename from dev-support/design-docs/HBASE-18070-ROOT_hbase:meta_Region_Replicas.pdf rename to dev-support/design-docs/HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf From 48d9d196dc52a82de8328fd58e21a0257ce3008b Mon Sep 17 00:00:00 2001 From: Pankaj Date: Thu, 14 Jan 2021 00:31:26 +0530 Subject: [PATCH 327/769] HBASE-25502 IntegrationTestMTTR fails with TableNotFoundException (#2879) --- .../java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index d15a9d650526..d9d8cbad39af 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -233,8 +233,7 @@ private static void setupTables() throws IOException { } // Create the table. If this fails then fail everything. - TableDescriptor tableDescriptor = util.getAdmin().getDescriptor(tableName); - TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); // Make the max file size huge so that splits don't happen during the test. builder.setMaxFileSize(Long.MAX_VALUE); From 4caab90aa71a59a0f17c5fa969f8066da37d0196 Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Thu, 14 Jan 2021 03:26:32 +0800 Subject: [PATCH 328/769] HBASE-25495 fix comment error of admin.rb (#2873) Co-authored-by: stevenxi Signed-off-by: Viraj Jasani --- hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 6228ad78486d..d874d6337b84 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1414,7 +1414,7 @@ def create_namespace(namespace_name, *args) #---------------------------------------------------------------------------------------------- # modify a namespace def alter_namespace(namespace_name, *args) - # Fail if table name is not a string + # Fail if namespace name is not a string raise(ArgumentError, 'Namespace name must be of type String') unless namespace_name.is_a?(String) nsd = @admin.getNamespaceDescriptor(namespace_name) From 3488c44a21612aae1835fc3e91a4a12ed2abb8b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Tue, 12 Jan 2021 09:06:13 -0500 Subject: [PATCH 329/769] HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml Revert of the revert -- re-applying HBASE-25449 with a change of renaming the test hdfs XML configuration file as it was adversely affecting tests using MiniDFS This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1. Co-authored-by: Josh Elser Signed-off-by: Peter Somogyi Signed-off-by: Michael Stack Signed-off-by: Duo Zhang --- .../src/main/resources/hbase-default.xml | 4 +- .../hadoop/hbase/TestHBaseConfiguration.java | 17 ++++++++ .../src/test/resources/hdfs-scr-disabled.xml | 42 +++++++++++++++++++ .../src/test/resources/hdfs-scr-enabled.xml | 42 +++++++++++++++++++ 4 files changed, 103 insertions(+), 2 deletions(-) create mode 100644 hbase-common/src/test/resources/hdfs-scr-disabled.xml create mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 9092dd147198..20f3881edb2c 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - false + If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - none + This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 6a0b4283ed03..ffa94ba2d59f 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -116,6 +116,23 @@ public void testSecurityConfCaseInsensitive() { Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } + @Test + public void testGetConfigOfShortcircuitRead() throws Exception { + Configuration conf = HBaseConfiguration.create(); + Configuration.addDefaultResource("hdfs-scr-disabled.xml"); + assertEquals("hdfs-scr-disabled.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("false", conf.get("dfs.client.read.shortcircuit")); + assertNull(conf.get("dfs.domain.socket.path")); + Configuration.addDefaultResource("hdfs-scr-enabled.xml"); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.domain.socket.path")[0]); + assertEquals("true", conf.get("dfs.client.read.shortcircuit")); + assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); + } + private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-common/src/test/resources/hdfs-scr-disabled.xml b/hbase-common/src/test/resources/hdfs-scr-disabled.xml new file mode 100644 index 000000000000..fdb3c36edc87 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-disabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + false + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml new file mode 100644 index 000000000000..8594494782c5 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + true + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + /var/lib/hadoop-hdfs/dn_socket + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + From 3cc24683001998f995d22591ce8d1c74f67fb9f7 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 14 Jan 2021 11:21:09 -0800 Subject: [PATCH 330/769] HBASE-25375 Provide a VM-based release environment (#2754) This adds a Vagrantfile and supporting automation that creates a virtual machine environment suitable for running the create-release scripting. Signed-off-by: Duo Zhang Signed-off-by: Michael Stack --- dev-support/release-vm/.gitignore | 3 + dev-support/release-vm/README.md | 141 ++++++++++++++++++ dev-support/release-vm/Vagrantfile | 50 +++++++ dev-support/release-vm/provision/focal.sh | 25 ++++ .../release-vm/puppet/production/.gitignore | 3 + .../production/.librarian/puppet/config | 21 +++ .../release-vm/puppet/production/Puppetfile | 27 ++++ .../puppet/production/data/nodes/rmvm.yaml | 31 ++++ .../puppet/production/environment.conf | 20 +++ .../release-vm/puppet/production/hiera.yaml | 23 +++ .../puppet/production/manifests/default.pp | 44 ++++++ 11 files changed, 388 insertions(+) create mode 100644 dev-support/release-vm/.gitignore create mode 100644 dev-support/release-vm/README.md create mode 100644 dev-support/release-vm/Vagrantfile create mode 100755 dev-support/release-vm/provision/focal.sh create mode 100644 dev-support/release-vm/puppet/production/.gitignore create mode 100644 dev-support/release-vm/puppet/production/.librarian/puppet/config create mode 100644 dev-support/release-vm/puppet/production/Puppetfile create mode 100644 dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml create mode 100644 dev-support/release-vm/puppet/production/environment.conf create mode 100644 dev-support/release-vm/puppet/production/hiera.yaml create mode 100644 dev-support/release-vm/puppet/production/manifests/default.pp diff --git a/dev-support/release-vm/.gitignore b/dev-support/release-vm/.gitignore new file mode 100644 index 000000000000..49b3eb5bd9e3 --- /dev/null +++ b/dev-support/release-vm/.gitignore @@ -0,0 +1,3 @@ +.vagrant/ +*.log +*.patch diff --git a/dev-support/release-vm/README.md b/dev-support/release-vm/README.md new file mode 100644 index 000000000000..74bb4392d2eb --- /dev/null +++ b/dev-support/release-vm/README.md @@ -0,0 +1,141 @@ + + +# HBase Release Env + +This is a vagrant project that provides a virtual machine environment suitable +for running an Apache HBase release. + +Requires: +* [VirtualBox](http://virtualbox.org) +* [Vagrant](http://virtualbox.org) +* The private portion of your signing key avilable in the local GPG agent +* The private portion of your Github authentication key available in either the local GPG agent or + local SSH agent + +## Usage + +Unlock the local keyring before proceeding (this should prompt you for your GPG passphrase). For +example, assuming you have an authentication key configured in your keyring, this will do the +trick. + +All terminal commands used below are assumed to be run with the current working directory as the +location containing the `Vagrantfile`. + +The term "Host" is used to mean the environment that runs the Vagrant process. The term "Guest" is +used to mean the virtual machine managed by the Host. + +### Ensure credentials work from the Host OS + +The ssh- and gpg-agent forwarding configuration used here assumes that your credentials work +on the Host. Verify both are working before you proceed with the Guest. Additionally, using the +credentials requires you to unlock the respective keyring, the state of which is persisted by the +agent process or processes running on the Host. + +See instructions in [`create-release`](../create-release/README.txt) regarding proper +configuration of ssh- and gpg-agents. + +Assuming the git repo origin is on GitHub, the following command will ensure that your ssh +credentials are working. On the Host, run: + +```sh +host:~$ ssh -T git@github.com +Hi ! You've successfully authenticated, but GitHub does not provide shell access. +``` + +Likewise, ensure you have an encryption key that can be used to sign a file. Again, on the Host, +run: + +```sh +host:~$ gpg --detach --armor --sign Vagrantfile +host:~$ gpg --verify Vagrantfile.asc +gpg: assuming signed data in 'Vagrantfile' +... +host:~$ rm Vagrantfile.asc +``` + +### Make public keyring available to the VM + +Export the public portion of your signing credentials where the Guest can access it. Vagrant +(+VirtualBox) shares the directory of the `Vagrantfile` with the Linux Guest via the `/vagrant` +mount point. Any files present in this working directory on the Host are available to the Guest. + +From the Host, run: + +```sh +host:~$ gpg --export @apache.org > gpg..apache.pub +``` + +### Launch the Guest VM + +Launch the Guest VM by running + +```sh +host:~$ vagrant up +``` + +If anything about the Vagrant or VirtualBox environment have changed since you last used this VM, +it's best to `vagrant destroy -f` all local state and `vagrant up` a fresh instance. + +### Verify the Guest VM + +Connect to the Guest. This should forward your ssh- and gpg-agent session, as configured in the +`Vagrantfile`. + +```sh +host:~$ vagrant ssh +``` + +Now that you're in the Guest VM, be sure that all `gpg` command you issue include the +`--no-autostart` flag. This ensures that the `gpg` process in the Guest communicates with the +agent running on the Host OS rather than launching its own process on the Guest OS. + +From the Guest, verify that ssh-agent forwarding is working, using the same test performed on the +Host, + +```sh +guest:~$ ssh -T git@github.com +Hi ! You've successfully authenticated, but GitHub does not provide shell access. +``` + +From the Guest, import your exported public identity and verify the gpg-agent passthrough is +working correctly. + +```sh +guest:~$ gpg --no-autostart --import /vagrant/gpg..apache.pub +... +gpg: Total number processed: 1 +gpg: imported: 1 +guest:~$ gpg --no-autostart --detach --armor --sign repos/hbase/pom.xml +guest:~$ gpg --no-autostart --verify repos/hbase/pom.xml.asc +gpg: assuming signed data in 'repos/hbase/pom.xml' +... +guest:~$ rm repos/hbase/pom.xml.asc +``` + +### Build a Release Candidate + +Finally, you can initiate the release build. Follow the instructions in +[`create-release`](../create-release/README.txt), i.e., + +```sh +guest:~$ mkdir ~/build-2.3.1-rc0 +guest:~$ cd repos/hbase +guest:~/repos/hbase$ ./dev-support/create-release/do-release-docker.sh -d ~/build-2.3.1-rc0/ ... +``` diff --git a/dev-support/release-vm/Vagrantfile b/dev-support/release-vm/Vagrantfile new file mode 100644 index 000000000000..e6a9a74b10ff --- /dev/null +++ b/dev-support/release-vm/Vagrantfile @@ -0,0 +1,50 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +Vagrant.configure("2") do |config| + + config.vm.define "rmvm" do |rmvm| + rmvm.vm.box = "ubuntu/focal64" + rmvm.vm.hostname = "rmvm" + + rmvm.vm.provision "shell", path: "provision/focal.sh", run: "once" + + rmvm.vm.provision "puppet", run: "always" do |puppet| + puppet.environment = "production" + puppet.environment_path = "puppet" + puppet.working_directory = "/tmp/vagrant-puppet" + puppet.options = "--test" + end + + rmvm.vm.provider "virtualbox" do |vb| + vb.name = "rmvm" + vb.cpus = 2 + vb.memory = (4 * 1024).to_s + end + end + + # pass through ssh-agent for github interaction + config.ssh.forward_agent = true + # pass through gpg-agent for artifact signing + config.ssh.extra_args = [ + "-R", "/run/user/1000/gnupg/S.gpg-agent:#{%x(gpgconf --list-dirs agent-extra-socket).strip}", + "-R", "/run/user/1000/gnupg/S.gpg-agent.extra:#{%x(gpgconf --list-dir agent-extra-socket).strip}", + ] +end diff --git a/dev-support/release-vm/provision/focal.sh b/dev-support/release-vm/provision/focal.sh new file mode 100755 index 000000000000..8dc30bc0a2ba --- /dev/null +++ b/dev-support/release-vm/provision/focal.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Bootstrap provisioner for a Ubuntu Bionic host. +# + +sudo apt-get update -qq +# puppet lets us manage the host, librarian-puppet lets us download puppet libraries +sudo apt-get install -y --no-install-recommends puppet librarian-puppet +cd /tmp/vagrant-puppet/environments/production && sudo librarian-puppet install --verbose diff --git a/dev-support/release-vm/puppet/production/.gitignore b/dev-support/release-vm/puppet/production/.gitignore new file mode 100644 index 000000000000..2df0470bce95 --- /dev/null +++ b/dev-support/release-vm/puppet/production/.gitignore @@ -0,0 +1,3 @@ +.tmp/ +modules/ +**/*.lock diff --git a/dev-support/release-vm/puppet/production/.librarian/puppet/config b/dev-support/release-vm/puppet/production/.librarian/puppet/config new file mode 100644 index 000000000000..738f292ed132 --- /dev/null +++ b/dev-support/release-vm/puppet/production/.librarian/puppet/config @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +LIBRARIAN_PUPPET_DESTRUCTIVE: 'false' +LIBRARIAN_PUPPET_USE_V1_API: '1' +LIBRARIAN_PUPPET_TMP: "/tmp/librarian_puppet/tmp" diff --git a/dev-support/release-vm/puppet/production/Puppetfile b/dev-support/release-vm/puppet/production/Puppetfile new file mode 100644 index 000000000000..3d5d5e44640e --- /dev/null +++ b/dev-support/release-vm/puppet/production/Puppetfile @@ -0,0 +1,27 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +forge "https://forgeapi.puppetlabs.com" + +mod 'puppet-packages', + :git => "https://github.com/greenaar/puppet-packages.git", + :ref => '8d6b8a85eea931e4cd045884d5786c1c1ff0df4c' +mod 'puppetlabs-docker', '3.10.1' +mod 'puppetlabs-stdlib', '5.2.0' +mod 'puppetlabs-vcsrepo', '3.1.0' +mod 'saz-ssh', '6.2.0' diff --git a/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml b/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml new file mode 100644 index 000000000000..44a66262e31a --- /dev/null +++ b/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +classes: + - docker + - packages::manage + - ssh + +packages::latest: + - curl + - git + - gnupg + - gnupg-agent + +ssh::server_options: + StreamLocalBindUnlink: 'yes' diff --git a/dev-support/release-vm/puppet/production/environment.conf b/dev-support/release-vm/puppet/production/environment.conf new file mode 100644 index 000000000000..c6deb8dd9886 --- /dev/null +++ b/dev-support/release-vm/puppet/production/environment.conf @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +modulepath = modules:site:$basemodulepath +manifest = manifests diff --git a/dev-support/release-vm/puppet/production/hiera.yaml b/dev-support/release-vm/puppet/production/hiera.yaml new file mode 100644 index 000000000000..a8bb7c1c965f --- /dev/null +++ b/dev-support/release-vm/puppet/production/hiera.yaml @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +version: 5 + +hierarchy: + - name: "Per-node data" + path: "nodes/%{facts.hostname}.yaml" diff --git a/dev-support/release-vm/puppet/production/manifests/default.pp b/dev-support/release-vm/puppet/production/manifests/default.pp new file mode 100644 index 000000000000..e429d5af4bed --- /dev/null +++ b/dev-support/release-vm/puppet/production/manifests/default.pp @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +lookup('classes', Array[String], 'unique').include + +node rmvm { + $user = 'vagrant' + + # include the default `vagrant` user in the `docker` group + user { $user: + groups => ['docker'], + require => Package['docker'], + } + + # allow managing git repos in puppet + vcsrepo { "/home/${user}/repos/hbase": + ensure => latest, + branch => 'master', + group => $user, + owner => $user, + keep_local_changes => true, + provider => git, + remote => 'origin', + source => { + 'origin' => 'https://github.com/apache/hbase.git', + }, + depth => 1, + } +} From 3d34623163a71db55757347ef8e1e57a48ac0e56 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 15 Jan 2021 13:45:08 +0800 Subject: [PATCH 331/769] HBASE-25439 Add BYTE unit in PrettyPrinter.Unit (addendum) (#2841) Signed-off-by: stack --- .../hadoop/hbase/client/TableDescriptorBuilder.java | 1 + .../hadoop/hbase/client/TestTableDescriptorBuilder.java | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index fd466654ea4e..d98386817148 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -251,6 +251,7 @@ public class TableDescriptorBuilder { public static PrettyPrinter.Unit getUnit(String key) { switch (key) { case MAX_FILESIZE: + case MEMSTORE_FLUSHSIZE: return PrettyPrinter.Unit.BYTE; default: return PrettyPrinter.Unit.NONE; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 425d59022ab0..43824afe8107 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -348,11 +348,15 @@ public void testStringCustomizedValues() throws HBaseException { "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", htd.toStringCustomizedValues()); - htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528").build(); + htd = TableDescriptorBuilder.newBuilder(htd) + .setMaxFileSize("10737942528") + .setMemStoreFlushSize("256MB") + .build(); assertEquals( "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " - + "MAX_FILESIZE => '10737942528 B (10GB 512KB)'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", + + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", htd.toStringCustomizedValues()); } From 9b670a489b8821573e3978762d7070e2f59801eb Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 15 Jan 2021 14:00:50 +0800 Subject: [PATCH 332/769] HBASE-25431 MAX_FILESIZE and MEMSTORE_FLUSHSIZE should not be set negative number (#2803) Signed-off-by: stack --- .../hbase/util/TableDescriptorChecker.java | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index c69d38a8ec25..30c07b325a17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -82,10 +83,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check max file size long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit - long maxFileSize = td.getMaxFileSize(); - if (maxFileSize < 0) { - maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit); - } + // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in + // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check + long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? + conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : + Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + @@ -96,10 +98,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check flush size long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit - long flushSize = td.getMemStoreFlushSize(); - if (flushSize < 0) { - flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit); - } + // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in + // hbase-site.xml, use flushSizeLowerLimit instead to skip this check + long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? + conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : + Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + From 1474ebcc6988e98abbd216863acb9388152c53f6 Mon Sep 17 00:00:00 2001 From: Anjan Das Date: Fri, 15 Jan 2021 12:18:12 +0530 Subject: [PATCH 333/769] HBASE-25475: Improve UT added as part of HBASE-25445 in TestSplitWALManager (#2855) Signed-off-by: Wellington Chevreuil --- .../hbase/master/TestSplitWALManager.java | 101 ++++++------------ 1 file changed, 32 insertions(+), 69 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index 10eda749891d..40adbeaf9fe2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -31,14 +31,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; -import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -98,58 +90,6 @@ public void teardown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{ - HBaseTestingUtility test_util_2 = new HBaseTestingUtility(); - Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); - test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); - CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir); - test_util_2.startMiniCluster(3); - HMaster master2 = test_util_2.getHBaseCluster().getMaster(); - LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem() - .getFileSystem().getUri()); - LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem() - .getWALFileSystem().getUri()); - Table table = test_util_2.createTable(TABLE_NAME, FAMILY); - test_util_2.waitTableAvailable(TABLE_NAME); - Admin admin = test_util_2.getAdmin(); - MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster() - .getMasterProcedureExecutor().getEnvironment(); - final ProcedureExecutor executor = test_util_2.getMiniHBaseCluster() - .getMaster().getMasterProcedureExecutor(); - List regionInfos = admin.getRegions(TABLE_NAME); - SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure( - env, regionInfos.get(0), Bytes.toBytes("row5")); - // Populate some rows in the table - LOG.info("Beginning put data to the table: " + TABLE_NAME.toString()); - int rowCount = 5; - for (int i = 0; i < rowCount; i++) { - byte[] row = Bytes.toBytes("row" + i); - Put put = new Put(row); - put.addColumn(FAMILY, FAMILY, FAMILY); - table.put(put); - } - executor.submitProcedure(splitProcedure); - LOG.info("Submitted SplitProcedure."); - test_util_2.waitFor(30000, () -> executor.getProcedures().stream() - .filter(p -> p instanceof TransitRegionStateProcedure) - .map(p -> (TransitRegionStateProcedure) p) - .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); - test_util_2.getMiniHBaseCluster().killRegionServer( - test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName()); - test_util_2.getMiniHBaseCluster().startRegionServer(); - test_util_2.waitUntilNoRegionsInTransition(); - Scan scan = new Scan(); - ResultScanner results = table.getScanner(scan); - int scanRowCount = 0; - while (results.next() != null) { - scanRowCount++; - } - Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount + - " were expected.", rowCount, scanRowCount); - test_util_2.shutdownMiniCluster(); - } - @Test public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>(); @@ -272,16 +212,22 @@ public void testGetWALsToSplit() throws Exception { Assert.assertEquals(0, metaWals.size()); } - @Test - public void testSplitLogs() throws Exception { - TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE); + private void splitLogsTestHelper(HBaseTestingUtility testUtil) throws Exception { + HMaster hmaster = testUtil.getHBaseCluster().getMaster(); + SplitWALManager splitWALManager = hmaster.getSplitWALManager(); + LOG.info("The Master FS is pointing to: " + hmaster.getMasterFileSystem() + .getFileSystem().getUri()); + LOG.info("The WAL FS is pointing to: " + hmaster.getMasterFileSystem() + .getWALFileSystem().getUri()); + + testUtil.createTable(TABLE_NAME, FAMILY, testUtil.KEYS_FOR_HBA_CREATE_TABLE); // load table - TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY); - ProcedureExecutor masterPE = master.getMasterProcedureExecutor(); - ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta(); - ServerName testServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream() - .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() - .get(); + testUtil.loadTable(testUtil.getConnection().getTable(TABLE_NAME), FAMILY); + ProcedureExecutor masterPE = hmaster.getMasterProcedureExecutor(); + ServerName metaServer = testUtil.getHBaseCluster().getServerHoldingMeta(); + ServerName testServer = testUtil.getHBaseCluster().getRegionServerThreads().stream() + .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() + .get(); List procedures = splitWALManager.splitWALs(testServer, false); Assert.assertEquals(1, procedures.size()); ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0)); @@ -294,6 +240,23 @@ public void testSplitLogs() throws Exception { Assert.assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size()); } + @Test + public void testSplitLogs() throws Exception { + splitLogsTestHelper(TEST_UTIL); + } + + @Test + public void testSplitLogsWithDifferentWalAndRootFS() throws Exception{ + HBaseTestingUtility testUtil2 = new HBaseTestingUtility(); + testUtil2.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1); + Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); + testUtil2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); + CommonFSUtils.setWALRootDir(testUtil2.getConfiguration(), dir); + testUtil2.startMiniCluster(3); + splitLogsTestHelper(testUtil2); + testUtil2.shutdownMiniCluster(); + } + @Test public void testWorkerReloadWhenMasterRestart() throws Exception { List testProcedures = new ArrayList<>(); From 643548f5f5d6dfd44beb95185eba682979473995 Mon Sep 17 00:00:00 2001 From: Christine Feng Date: Sat, 16 Jan 2021 00:03:13 -0800 Subject: [PATCH 334/769] HBASE-25478 - Implement retries when enabling tables in TestRegionReplicaReplicationEndpoint (#2866) Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../TestRegionReplicaReplicationEndpoint.java | 47 ++++++++++++++----- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java index 54560582cd35..62989d3bf747 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.List; import java.util.UUID; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell.Type; @@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -74,6 +76,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles; /** * Tests RegionReplicaReplicationEndpoint class by setting up region replicas and verifying @@ -145,7 +148,7 @@ public void testRegionReplicaReplicationPeerIsCreated() throws IOException { .createTableDescriptor(TableName.valueOf("testReplicationPeerIsCreated_no_region_replicas"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); try { peerConfig = admin.getReplicationPeerConfig(peerId); fail("Should throw ReplicationException, because replication peer id=" + peerId @@ -157,7 +160,7 @@ public void testRegionReplicaReplicationPeerIsCreated() throws IOException { htd = HTU.createModifyableTableDescriptor(TableName.valueOf("testReplicationPeerIsCreated"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED).setRegionReplication(2).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); // assert peer configuration is correct peerConfig = admin.getReplicationPeerConfig(peerId); @@ -193,7 +196,7 @@ public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exc TableName.valueOf("testRegionReplicaReplicationPeerIsCreatedForModifyTable"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); // assert that replication peer is not created yet try { @@ -207,7 +210,7 @@ public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exc HTU.getAdmin().disableTable(htd.getTableName()); htd = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(2).build(); HTU.getAdmin().modifyTable(htd); - HTU.getAdmin().enableTable(htd.getTableName()); + createOrEnableTableWithRetries(htd, false); // assert peer configuration is correct peerConfig = admin.getReplicationPeerConfig(peerId); @@ -229,7 +232,7 @@ public void testRegionReplicaReplication(int regionReplication) throws Exception ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) .setRegionReplication(regionReplication).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); TableName tableNameNoReplicas = TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS"); HTU.deleteTableIfAny(tableNameNoReplicas); @@ -318,7 +321,7 @@ public void testRegionReplicaWithoutMemstoreReplication() throws Exception { int regionReplication = 3; TableDescriptor htd = HTU.createModifyableTableDescriptor(name.getMethodName()) .setRegionReplication(regionReplication).setRegionMemStoreReplication(false).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); final TableName tableName = htd.getTableName(); Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(tableName); @@ -352,7 +355,7 @@ public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception int regionReplication = 3; TableDescriptor htd = HTU.createModifyableTableDescriptor(name.getMethodName()) .setRegionReplication(regionReplication).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); final TableName tableName = htd.getTableName(); Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); @@ -404,7 +407,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa final TableName tableName = htd.getTableName(); HTU.deleteTableIfAny(tableName); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); TableName toBeDisabledTable = TableName.valueOf( dropTable ? "droppedTable" : (disableReplication ? "disableReplication" : "disabledTable")); HTU.deleteTableIfAny(toBeDisabledTable); @@ -413,7 +416,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) .setRegionReplication(regionReplication).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); // both tables are created, now pause replication HTU.getAdmin().disableReplicationPeer(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_PEER); @@ -443,7 +446,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa htd = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(regionReplication - 2).build(); HTU.getAdmin().modifyTable(htd); - HTU.getAdmin().enableTable(toBeDisabledTable); + createOrEnableTableWithRetries(htd, false); } HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(0); @@ -467,7 +470,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table htd = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(regionReplication).build(); HTU.getAdmin().modifyTable(htd); - HTU.getAdmin().enableTable(toBeDisabledTable); + createOrEnableTableWithRetries(htd, false); } try { @@ -487,4 +490,26 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa connection.close(); } } + + private void createOrEnableTableWithRetries(TableDescriptor htd, boolean createTableOperation) { + // Helper function to run create/enable table operations with a retry feature + boolean continueToRetry = true; + int tries = 0; + while (continueToRetry && tries < 50) { + try { + continueToRetry = false; + if (createTableOperation) { + HTU.getAdmin().createTable(htd); + } else { + HTU.getAdmin().enableTable(htd.getTableName()); + } + } catch (IOException e) { + if (e.getCause() instanceof ReplicationException) { + continueToRetry = true; + tries++; + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + } + } + } } From d5debe15b5a20c559fabbdb82ce2e1598c8f3030 Mon Sep 17 00:00:00 2001 From: Anjan Das Date: Sun, 17 Jan 2021 16:31:07 +0530 Subject: [PATCH 335/769] HBASE-25475 : Unset zk based wal splitting explicitly in tests (ADDENDUM) (#2891) Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/master/TestSplitWALManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index 40adbeaf9fe2..e1f318869bab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -248,6 +248,7 @@ public void testSplitLogs() throws Exception { @Test public void testSplitLogsWithDifferentWalAndRootFS() throws Exception{ HBaseTestingUtility testUtil2 = new HBaseTestingUtility(); + testUtil2.getConfiguration().setBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, false); testUtil2.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1); Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); testUtil2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); From 9e9bec24d2c6e527aaeb0ffe5510e5edb12de40d Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Mon, 18 Jan 2021 18:57:02 +0800 Subject: [PATCH 336/769] =?UTF-8?q?HBASE-25497=20move=5Fnamespaces=5Frsgro?= =?UTF-8?q?up=20should=20change=20hbase.rsgroup.name=20=E2=80=A6=20(#2875)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * HBASE-25497 move_namespaces_rsgroup should change hbase.rsgroup.name config in NamespaceDescriptor Signed-off-by: Zheng Wang --- .../main/ruby/shell/commands/move_namespaces_rsgroup.rb | 4 ++++ hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb index 7878eb9317ca..60b0bff62c28 100644 --- a/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb +++ b/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb @@ -31,6 +31,10 @@ def help def command(dest, namespaces) rsgroup_admin.move_namespaces(dest, namespaces) + namespaces.each do |ns| + arg = {'METHOD' => 'set', 'hbase.rsgroup.name' => dest} + admin.alter_namespace(ns, arg) + end end end end diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb index eec92b30ba0b..f93d36422491 100644 --- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb +++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb @@ -114,8 +114,16 @@ def remove_rsgroup(group_name) assert_not_nil(group) assert_true(@admin.listTablesInRSGroup(group_name).contains(org.apache.hadoop.hbase.TableName.valueOf(ns_table_name))) + ns_table_name2 = 'test_namespace:test_ns_table2' + @shell.command(:create, ns_table_name2, 'f') + + assert_true(@admin.listTablesInRSGroup(group_name).contains(org.apache.hadoop.hbase.TableName.valueOf(ns_table_name2))) + assert_equal(2, @admin.listTablesInRSGroup(group_name).count) + @shell.command(:disable, ns_table_name) @shell.command(:drop, ns_table_name) + @shell.command(:disable, ns_table_name2) + @shell.command(:drop, ns_table_name2) @shell.command(:drop_namespace, namespace_name) remove_rsgroup(group_name) end From 6c3861f65d82e6c123c05eb9fd54f3207d0ba222 Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Tue, 19 Jan 2021 17:21:01 +0800 Subject: [PATCH 337/769] HBASE-25496 add get_namespace_rsgroup command (#2874) Signed-off-by: Viraj Jasani --- hbase-shell/src/main/ruby/hbase/admin.rb | 10 +++++ hbase-shell/src/main/ruby/shell.rb | 1 + .../shell/commands/get_namespace_rsgroup.rb | 41 +++++++++++++++++++ 3 files changed, 52 insertions(+) create mode 100644 hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index d874d6337b84..4e3b0de16a04 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1447,6 +1447,16 @@ def alter_namespace(namespace_name, *args) @admin.modifyNamespace(nsb.build) end + #---------------------------------------------------------------------------------------------- + # Get namespace's rsgroup + def get_namespace_rsgroup(namespace_name) + # Fail if namespace name is not a string + raise(ArgumentError, 'Namespace name must be of type String') unless namespace_name.is_a?(String) + nsd = @admin.getNamespaceDescriptor(namespace_name) + raise(ArgumentError, 'Namespace does not exist') unless nsd + nsd.getConfigurationValue("hbase.rsgroup.name") + end + #---------------------------------------------------------------------------------------------- # Drops a table def drop_namespace(namespace_name) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index ba069146ef31..549e31d1dd65 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -628,5 +628,6 @@ def self.exception_handler(hide_traceback) rename_rsgroup alter_rsgroup_config show_rsgroup_config + get_namespace_rsgroup ] ) diff --git a/hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb new file mode 100644 index 000000000000..a4991d16943b --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetNamespaceRsgroup < Command + def help + <<-EOF +Get the group name the given NameSpace is a member of. + +Example: + + hbase> get_namespace_rsgroup 'namespace_name' + +EOF + end + + def command(namespace_name) + group_name = admin.get_namespace_rsgroup(namespace_name) + unless group_name.nil? + formatter.row([group_name]) + end + formatter.footer(1) + end + end + end +end \ No newline at end of file From bc4f5c2709a71c985cd352da6374565e809e75e0 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Tue, 19 Jan 2021 23:53:51 +0800 Subject: [PATCH 338/769] HBASE-25513 When the table is turned on normalize, the first region may not be merged even the size is 0 (#2887) Signed-off-by: Nick Dimiduk --- .../normalizer/SimpleRegionNormalizer.java | 4 +++ .../TestSimpleRegionNormalizer.java | 32 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 6d7387b7f11b..08e529cd01a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -364,6 +364,10 @@ private List computeMergeNormalizationPlans(final NormalizeCo if (rangeMembers.isEmpty() // when there are no range members, seed the range with whatever // we have. this way we're prepared in case the next region is // 0-size. + || (rangeMembers.size() == 1 && sumRangeMembersSizeMb == 0) // when there is only one + // region and the size is 0, + // seed the range with + // whatever we have. || regionSizeMb == 0 // always add an empty region to the current range. || (regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb)) { // add the current region // to the range when diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 33b32972542e..70f5a87ac42a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -448,6 +448,38 @@ public void testMergeEmptyRegions1() { .build())); } + @Test + public void testMergeEmptyRegions2() { + conf.setBoolean(SPLIT_ENABLED_KEY, false); + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 8); + final Map regionSizes = + createRegionSizesMap(regionInfos, 0, 10, 1, 0, 9, 0, 10, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + + assertFalse(normalizer.isSplitEnabled()); + assertEquals(0, normalizer.getMergeMinRegionSizeMb()); + List plans = normalizer.computePlansForTable(tableName); + assertThat(plans, contains( + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 10) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 1) + .addTarget(regionInfos.get(3), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(4), 9) + .addTarget(regionInfos.get(5), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(6), 10) + .addTarget(regionInfos.get(7), 0) + .build())); + } + @Test public void testSplitAndMultiMerge() { conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); From 871eb09b3d051dfbc864260d2d26ab4dc4018ec0 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Wed, 20 Jan 2021 03:09:50 +0800 Subject: [PATCH 339/769] HBASE-25482 Improve SimpleRegionNormalizer#getAverageRegionSizeMb (#2858) Signed-off-by: Nick Dimiduk --- .../normalizer/SimpleRegionNormalizer.java | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 08e529cd01a4..1675e049d77d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -257,16 +257,13 @@ private double getAverageRegionSizeMb(final List tableRegions) { throw new IllegalStateException( "Cannot calculate average size of a table without any regions."); } - final int regionCount = tableRegions.size(); - final long totalSizeMb = tableRegions.stream() - .mapToLong(this::getRegionSizeMB) - .sum(); TableName table = tableRegions.get(0).getTable(); int targetRegionCount = -1; long targetRegionSize = -1; + double avgRegionSize; try { TableDescriptor tableDescriptor = masterServices.getTableDescriptors().get(table); - if (tableDescriptor != null && LOG.isDebugEnabled()) { + if (tableDescriptor != null) { targetRegionCount = tableDescriptor.getNormalizerTargetRegionCount(); targetRegionSize = tableDescriptor.getNormalizerTargetRegionSize(); LOG.debug("Table {} configured with target region count {}, target region size {}", table, @@ -276,18 +273,22 @@ private double getAverageRegionSizeMb(final List tableRegions) { LOG.warn("TableDescriptor for {} unavailable, table-level target region count and size" + " configurations cannot be considered.", table, e); } - - double avgRegionSize; if (targetRegionSize > 0) { avgRegionSize = targetRegionSize; - } else if (targetRegionCount > 0) { - avgRegionSize = totalSizeMb / (double) targetRegionCount; } else { - avgRegionSize = totalSizeMb / (double) regionCount; + final int regionCount = tableRegions.size(); + final long totalSizeMb = tableRegions.stream() + .mapToLong(this::getRegionSizeMB) + .sum(); + if (targetRegionCount > 0) { + avgRegionSize = totalSizeMb / (double) targetRegionCount; + } else { + avgRegionSize = totalSizeMb / (double) regionCount; + } + LOG.debug("Table {}, total aggregated regions size: {} and average region size {}", table, + totalSizeMb, avgRegionSize); } - LOG.debug("Table {}, total aggregated regions size: {} and average region size {}", table, - totalSizeMb, avgRegionSize); return avgRegionSize; } From a37e72799015ce2a0d6ce98566149a17b31c99ea Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 20 Jan 2021 16:10:36 +0800 Subject: [PATCH 340/769] =?UTF-8?q?HBASE-25509=20ChoreService.cancelChore?= =?UTF-8?q?=20will=20not=20call=20ScheduledChore.cle=E2=80=A6=20(#2890)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/ChoreService.java | 135 ++- .../apache/hadoop/hbase/ScheduledChore.java | 103 +- .../apache/hadoop/hbase/TestChoreService.java | 1076 ++++++++--------- .../apache/hadoop/hbase/master/HMaster.java | 40 +- .../hbase/master/RegionsRecoveryChore.java | 22 - .../master/RegionsRecoveryConfigManager.java | 45 +- .../hadoop/hbase/master/ServerManager.java | 2 +- .../hadoop/hbase/master/SplitLogManager.java | 2 +- .../hadoop/hbase/quotas/QuotaCache.java | 2 +- .../quotas/RegionServerSpaceQuotaManager.java | 4 +- .../hbase/regionserver/HRegionServer.java | 24 +- .../hbase/regionserver/HeapMemoryManager.java | 2 +- .../TestRegionsRecoveryConfigManager.java | 58 +- .../master/janitor/TestCatalogJanitor.java | 2 +- 14 files changed, 718 insertions(+), 799 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java index 39c3ccc69199..5bd67ad02eec 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; @@ -26,8 +27,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hbase.ScheduledChore.ChoreServicer; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +51,7 @@ * Calling this method ensures that all scheduled chores are cancelled and cleaned up properly. */ @InterfaceAudience.Public -public class ChoreService implements ChoreServicer { +public class ChoreService { private static final Logger LOG = LoggerFactory.getLogger(ChoreService.class); /** @@ -141,28 +140,39 @@ public ChoreService(final String coreThreadPoolPrefix, int corePoolSize, boolean * @return true when the chore was successfully scheduled. false when the scheduling failed * (typically occurs when a chore is scheduled during shutdown of service) */ - public synchronized boolean scheduleChore(ScheduledChore chore) { + public boolean scheduleChore(ScheduledChore chore) { if (chore == null) { return false; } - - try { - if (chore.getPeriod() <= 0) { - LOG.info("Chore {} is disabled because its period is not positive.", chore); - return false; - } - LOG.info("Chore {} is enabled.", chore); - chore.setChoreServicer(this); - ScheduledFuture future = - scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), chore.getPeriod(), - chore.getTimeUnit()); - scheduledChores.put(chore, future); - return true; - } catch (Exception exception) { - if (LOG.isInfoEnabled()) { - LOG.info("Could not successfully schedule chore: " + chore.getName()); + // always lock chore first to prevent dead lock + synchronized (chore) { + synchronized (this) { + try { + // Chores should only ever be scheduled with a single ChoreService. If the choreService + // is changing, cancel any existing schedules of this chore. + if (chore.getChoreService() == this) { + LOG.warn("Chore {} has already been scheduled with us", chore); + return false; + } + if (chore.getPeriod() <= 0) { + LOG.info("Chore {} is disabled because its period is not positive.", chore); + return false; + } + LOG.info("Chore {} is enabled.", chore); + if (chore.getChoreService() != null) { + LOG.info("Cancel chore {} from its previous service", chore); + chore.getChoreService().cancelChore(chore); + } + chore.setChoreService(this); + ScheduledFuture future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), + chore.getPeriod(), chore.getTimeUnit()); + scheduledChores.put(chore, future); + return true; + } catch (Exception e) { + LOG.error("Could not successfully schedule chore: {}", chore.getName(), e); + return false; + } } - return false; } } @@ -175,19 +185,35 @@ private void rescheduleChore(ScheduledChore chore) { ScheduledFuture future = scheduledChores.get(chore); future.cancel(false); } - scheduleChore(chore); + ScheduledFuture future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), + chore.getPeriod(), chore.getTimeUnit()); + scheduledChores.put(chore, future); } - @InterfaceAudience.Private - @Override - public synchronized void cancelChore(ScheduledChore chore) { + /** + * Cancel any ongoing schedules that this chore has with the implementer of this interface. + *

    + * Call {@link ScheduledChore#cancel()} to cancel a {@link ScheduledChore}, in + * {@link ScheduledChore#cancel()} method we will call this method to remove the + * {@link ScheduledChore} from this {@link ChoreService}. + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/(ScheduledChore|ChoreService).java") + synchronized void cancelChore(ScheduledChore chore) { cancelChore(chore, true); } - @InterfaceAudience.Private - @Override - public synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning) { - if (chore != null && scheduledChores.containsKey(chore)) { + /** + * Cancel any ongoing schedules that this chore has with the implementer of this interface. + *

    + * Call {@link ScheduledChore#cancel(boolean)} to cancel a {@link ScheduledChore}, in + * {@link ScheduledChore#cancel(boolean)} method we will call this method to remove the + * {@link ScheduledChore} from this {@link ChoreService}. + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/(ScheduledChore|ChoreService).java") + synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning) { + if (scheduledChores.containsKey(chore)) { ScheduledFuture future = scheduledChores.get(chore); future.cancel(mayInterruptIfRunning); scheduledChores.remove(chore); @@ -201,21 +227,24 @@ public synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptI } } + /** + * @return true when the chore is scheduled with the implementer of this interface + */ @InterfaceAudience.Private - @Override public synchronized boolean isChoreScheduled(ScheduledChore chore) { return chore != null && scheduledChores.containsKey(chore) && !scheduledChores.get(chore).isDone(); } - @InterfaceAudience.Private - @Override - public synchronized boolean triggerNow(ScheduledChore chore) { - if (chore != null) { - rescheduleChore(chore); - return true; - } - return false; + /** + * This method tries to execute the chore immediately. If the chore is executing at the time of + * this call, the chore will begin another execution as soon as the current execution finishes + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ScheduledChore.java") + synchronized void triggerNow(ScheduledChore chore) { + assert chore.getChoreService() == this; + rescheduleChore(chore); } /** @@ -295,10 +324,20 @@ private synchronized void requestCorePoolDecrease() { } } - @InterfaceAudience.Private - @Override - public synchronized void onChoreMissedStartTime(ScheduledChore chore) { - if (chore == null || !scheduledChores.containsKey(chore)) return; + /** + * A callback that tells the implementer of this interface that one of the scheduled chores is + * missing its start time. The implication of a chore missing its start time is that the service's + * current means of scheduling may not be sufficient to handle the number of ongoing chores (the + * other explanation is that the chore's execution time is greater than its scheduled period). The + * service should try to increase its concurrency when this callback is received. + * @param chore The chore that missed its start time + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ScheduledChore.java") + synchronized void onChoreMissedStartTime(ScheduledChore chore) { + if (!scheduledChores.containsKey(chore)) { + return; + } // If the chore has not caused an increase in the size of the core thread pool then request an // increase. This allows each chore missing its start time to increase the core pool size by @@ -319,13 +358,17 @@ public synchronized void onChoreMissedStartTime(ScheduledChore chore) { * shutdown the service. Any chores that are scheduled for execution will be cancelled. Any chores * in the middle of execution will be interrupted and shutdown. This service will be unusable * after this method has been called (i.e. future scheduling attempts will fail). + *

    + * Notice that, this will only clean the chore from this ChoreService but you could still schedule + * the chore with other ChoreService. */ public synchronized void shutdown() { - scheduler.shutdownNow(); - if (LOG.isInfoEnabled()) { - LOG.info("Chore service for: " + coreThreadPoolPrefix + " had " + scheduledChores.keySet() - + " on shutdown"); + if (isShutdown()) { + return; } + scheduler.shutdownNow(); + LOG.info("Chore service for: {} had {} on shutdown", coreThreadPoolPrefix, + scheduledChores.keySet()); cancelAllChores(true); scheduledChores.clear(); choresMissingStartTime.clear(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java index 1fb5b7e9e340..6155bbdeb3b0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.yetus.audience.InterfaceAudience; @@ -33,7 +34,7 @@ * execute within the defined period. It is bad practice to define a ScheduledChore whose execution * time exceeds its period since it will try to hog one of the threads in the {@link ChoreService}'s * thread pool. - *

    + *

    * Don't subclass ScheduledChore if the task relies on being woken up for something to do, such as * an entry being added to a queue, etc. */ @@ -60,7 +61,7 @@ public abstract class ScheduledChore implements Runnable { * Interface to the ChoreService that this ScheduledChore is scheduled with. null if the chore is * not scheduled. */ - private ChoreServicer choreServicer; + private ChoreService choreService; /** * Variables that encapsulate the meaningful state information @@ -77,39 +78,6 @@ public abstract class ScheduledChore implements Runnable { */ private final Stoppable stopper; - interface ChoreServicer { - /** - * Cancel any ongoing schedules that this chore has with the implementer of this interface. - */ - public void cancelChore(ScheduledChore chore); - public void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning); - - /** - * @return true when the chore is scheduled with the implementer of this interface - */ - public boolean isChoreScheduled(ScheduledChore chore); - - /** - * This method tries to execute the chore immediately. If the chore is executing at the time of - * this call, the chore will begin another execution as soon as the current execution finishes - *

    - * If the chore is not scheduled with a ChoreService, this call will fail. - * @return false when the chore could not be triggered immediately - */ - public boolean triggerNow(ScheduledChore chore); - - /** - * A callback that tells the implementer of this interface that one of the scheduled chores is - * missing its start time. The implication of a chore missing its start time is that the - * service's current means of scheduling may not be sufficient to handle the number of ongoing - * chores (the other explanation is that the chore's execution time is greater than its - * scheduled period). The service should try to increase its concurrency when this callback is - * received. - * @param chore The chore that missed its start time - */ - public void onChoreMissedStartTime(ScheduledChore chore); - } - /** * This constructor is for test only. It allows us to create an object and to call chore() on it. */ @@ -168,8 +136,8 @@ public void run() { onChoreMissedStartTime(); LOG.info("Chore: {} missed its start time", getName()); } else if (stopper.isStopped() || !isScheduled()) { - cancel(false); - cleanup(); + // call shutdown here to cleanup the ScheduledChore. + shutdown(false); LOG.info("Chore: {} was stopped", getName()); } else { try { @@ -193,7 +161,6 @@ public void run() { LOG.error("Caught error", t); if (this.stopper.isStopped()) { cancel(false); - cleanup(); } } } @@ -214,7 +181,9 @@ private synchronized void updateTimeTrackingBeforeRun() { * pool threads */ private synchronized void onChoreMissedStartTime() { - if (choreServicer != null) choreServicer.onChoreMissedStartTime(this); + if (choreService != null) { + choreService.onChoreMissedStartTime(this); + } } /** @@ -253,20 +222,17 @@ private synchronized boolean isValidTime(final long time) { * @return false when the Chore is not currently scheduled with a ChoreService */ public synchronized boolean triggerNow() { - if (choreServicer != null) { - return choreServicer.triggerNow(this); - } else { + if (choreService == null) { return false; } + choreService.triggerNow(this); + return true; } - synchronized void setChoreServicer(ChoreServicer service) { - // Chores should only ever be scheduled with a single ChoreService. If the choreServicer - // is changing, cancel any existing schedules of this chore. - if (choreServicer != null && choreServicer != service) { - choreServicer.cancelChore(this, false); - } - choreServicer = service; + @RestrictedApi(explanation = "Should only be called in ChoreService", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ChoreService.java") + synchronized void setChoreService(ChoreService service) { + choreService = service; timeOfThisRun = -1; } @@ -275,9 +241,10 @@ public synchronized void cancel() { } public synchronized void cancel(boolean mayInterruptIfRunning) { - if (isScheduled()) choreServicer.cancelChore(this, mayInterruptIfRunning); - - choreServicer = null; + if (isScheduled()) { + choreService.cancelChore(this, mayInterruptIfRunning); + } + choreService = null; } public String getName() { @@ -310,17 +277,14 @@ public synchronized boolean isInitialChoreComplete() { return initialChoreComplete; } - @InterfaceAudience.Private - synchronized ChoreServicer getChoreServicer() { - return choreServicer; + synchronized ChoreService getChoreService() { + return choreService; } - @InterfaceAudience.Private synchronized long getTimeOfLastRun() { return timeOfLastRun; } - @InterfaceAudience.Private synchronized long getTimeOfThisRun() { return timeOfThisRun; } @@ -329,10 +293,12 @@ synchronized long getTimeOfThisRun() { * @return true when this Chore is scheduled with a ChoreService */ public synchronized boolean isScheduled() { - return choreServicer != null && choreServicer.isChoreScheduled(this); + return choreService != null && choreService.isChoreScheduled(this); } @InterfaceAudience.Private + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") public synchronized void choreForTesting() { chore(); } @@ -354,7 +320,26 @@ protected boolean initialChore() { /** * Override to run cleanup tasks when the Chore encounters an error and must stop running */ - protected synchronized void cleanup() { + protected void cleanup() { + } + + /** + * Call {@link #shutdown(boolean)} with {@code true}. + * @see ScheduledChore#shutdown(boolean) + */ + public synchronized void shutdown() { + shutdown(true); + } + + /** + * Completely shutdown the ScheduleChore, which means we will call cleanup and you should not + * schedule it again. + *

    + * This is another path to cleanup the chore, comparing to stop the stopper instance passed in. + */ + public synchronized void shutdown(boolean mayInterruptIfRunning) { + cancel(mayInterruptIfRunning); + cleanup(); } /** diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java index 69a171c205f7..64a076a60633 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java @@ -20,16 +20,18 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.CountingChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.DoNothingChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.FailInitialChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.SampleStopper; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.SleepingChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.SlowChore; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Threads; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -38,261 +40,234 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category(MediumTests.class) +@Category({ MiscTests.class, MediumTests.class }) public class TestChoreService { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestChoreService.class); + HBaseClassTestRule.forClass(TestChoreService.class); - public static final Logger log = LoggerFactory.getLogger(TestChoreService.class); + private static final Logger LOG = LoggerFactory.getLogger(TestChoreService.class); + + private static final Configuration CONF = HBaseConfiguration.create(); @Rule public TestName name = new TestName(); + private int initialCorePoolSize = 3; + + private ChoreService service; + + @Before + public void setUp() { + service = new ChoreService(name.getMethodName(), initialCorePoolSize, false); + } + + @After + public void tearDown() { + shutdownService(service); + } + /** - * A few ScheduledChore samples that are useful for testing with ChoreService + * Straight forward stopper implementation that is used by default when one is not provided */ - public static class ScheduledChoreSamples { - /** - * Straight forward stopper implementation that is used by default when one is not provided - */ - public static class SampleStopper implements Stoppable { - private boolean stopped = false; - - @Override - public void stop(String why) { - stopped = true; - } + private static class SampleStopper implements Stoppable { + private boolean stopped = false; - @Override - public boolean isStopped() { - return stopped; - } + @Override + public void stop(String why) { + stopped = true; } - /** - * Sleeps for longer than the scheduled period. This chore always misses its scheduled periodic - * executions - */ - public static class SlowChore extends ScheduledChore { - public SlowChore(String name, int period) { - this(name, new SampleStopper(), period); - } - - public SlowChore(String name, Stoppable stopper, int period) { - super(name, stopper, period); - } + @Override + public boolean isStopped() { + return stopped; + } + } - @Override - protected boolean initialChore() { - try { - Thread.sleep(getPeriod() * 2); - } catch (InterruptedException e) { - log.warn("", e); - } - return true; - } + /** + * Sleeps for longer than the scheduled period. This chore always misses its scheduled periodic + * executions + */ + private static class SlowChore extends ScheduledChore { + public SlowChore(String name, int period) { + this(name, new SampleStopper(), period); + } - @Override - protected void chore() { - try { - Thread.sleep(getPeriod() * 2); - } catch (InterruptedException e) { - log.warn("", e); - } - } + public SlowChore(String name, Stoppable stopper, int period) { + super(name, stopper, period); } - /** - * Lightweight ScheduledChore used primarily to fill the scheduling queue in tests - */ - public static class DoNothingChore extends ScheduledChore { - public DoNothingChore(String name, int period) { - super(name, new SampleStopper(), period); - } + @Override + protected boolean initialChore() { + Threads.sleep(getPeriod() * 2); + return true; + } - public DoNothingChore(String name, Stoppable stopper, int period) { - super(name, stopper, period); - } + @Override + protected void chore() { + Threads.sleep(getPeriod() * 2); + } + } - @Override - protected void chore() { - // DO NOTHING - } + /** + * Lightweight ScheduledChore used primarily to fill the scheduling queue in tests + */ + private static class DoNothingChore extends ScheduledChore { + public DoNothingChore(String name, int period) { + super(name, new SampleStopper(), period); } - public static class SleepingChore extends ScheduledChore { - private int sleepTime; + public DoNothingChore(String name, Stoppable stopper, int period) { + super(name, stopper, period); + } - public SleepingChore(String name, int chorePeriod, int sleepTime) { - this(name, new SampleStopper(), chorePeriod, sleepTime); - } + @Override + protected void chore() { + // DO NOTHING + } + } - public SleepingChore(String name, Stoppable stopper, int period, int sleepTime) { - super(name, stopper, period); - this.sleepTime = sleepTime; - } + private static class SleepingChore extends ScheduledChore { + private int sleepTime; - @Override - protected boolean initialChore() { - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - log.warn("", e); - } - return true; - } + public SleepingChore(String name, int chorePeriod, int sleepTime) { + this(name, new SampleStopper(), chorePeriod, sleepTime); + } - @Override - protected void chore() { - try { - Thread.sleep(sleepTime); - } catch (Exception e) { - log.warn("", e); - } - } + public SleepingChore(String name, Stoppable stopper, int period, int sleepTime) { + super(name, stopper, period); + this.sleepTime = sleepTime; } - public static class CountingChore extends ScheduledChore { - private int countOfChoreCalls; - private boolean outputOnTicks = false; + @Override + protected boolean initialChore() { + Threads.sleep(sleepTime); + return true; + } - public CountingChore(String name, int period) { - this(name, new SampleStopper(), period); - } + @Override + protected void chore() { + Threads.sleep(sleepTime); + } + } - public CountingChore(String name, Stoppable stopper, int period) { - this(name, stopper, period, false); - } + private static class CountingChore extends ScheduledChore { + private int countOfChoreCalls; + private boolean outputOnTicks = false; - public CountingChore(String name, Stoppable stopper, int period, - final boolean outputOnTicks) { - super(name, stopper, period); - this.countOfChoreCalls = 0; - this.outputOnTicks = outputOnTicks; - } + public CountingChore(String name, int period) { + this(name, new SampleStopper(), period); + } - @Override - protected boolean initialChore() { - countOfChoreCalls++; - if (outputOnTicks) { - outputTickCount(); - } - return true; - } + public CountingChore(String name, Stoppable stopper, int period) { + this(name, stopper, period, false); + } - @Override - protected void chore() { - countOfChoreCalls++; - if (outputOnTicks) { - outputTickCount(); - } - } + public CountingChore(String name, Stoppable stopper, int period, final boolean outputOnTicks) { + super(name, stopper, period); + this.countOfChoreCalls = 0; + this.outputOnTicks = outputOnTicks; + } - private void outputTickCount() { - log.info("Chore: " + getName() + ". Count of chore calls: " + countOfChoreCalls); + @Override + protected boolean initialChore() { + countOfChoreCalls++; + if (outputOnTicks) { + outputTickCount(); } + return true; + } - public int getCountOfChoreCalls() { - return countOfChoreCalls; + @Override + protected void chore() { + countOfChoreCalls++; + if (outputOnTicks) { + outputTickCount(); } + } - public boolean isOutputtingOnTicks() { - return outputOnTicks; - } + private void outputTickCount() { + LOG.info("Chore: " + getName() + ". Count of chore calls: " + countOfChoreCalls); + } - public void setOutputOnTicks(boolean o) { - outputOnTicks = o; - } + public int getCountOfChoreCalls() { + return countOfChoreCalls; } + } + + /** + * A Chore that will try to execute the initial chore a few times before succeeding. Once the + * initial chore is complete the chore cancels itself + */ + public static class FailInitialChore extends ScheduledChore { + private int numberOfFailures; + private int failureThreshold; /** - * A Chore that will try to execute the initial chore a few times before succeeding. Once the - * initial chore is complete the chore cancels itself + * @param failThreshold Number of times the Chore fails when trying to execute initialChore + * before succeeding. */ - public static class FailInitialChore extends ScheduledChore { - private int numberOfFailures; - private int failureThreshold; - - /** - * @param failThreshold Number of times the Chore fails when trying to execute initialChore - * before succeeding. - */ - public FailInitialChore(String name, int period, int failThreshold) { - this(name, new SampleStopper(), period, failThreshold); - } - - public FailInitialChore(String name, Stoppable stopper, int period, int failThreshold) { - super(name, stopper, period); - numberOfFailures = 0; - failureThreshold = failThreshold; - } + public FailInitialChore(String name, int period, int failThreshold) { + this(name, new SampleStopper(), period, failThreshold); + } - @Override - protected boolean initialChore() { - if (numberOfFailures < failureThreshold) { - numberOfFailures++; - return false; - } else { - return true; - } - } + public FailInitialChore(String name, Stoppable stopper, int period, int failThreshold) { + super(name, stopper, period); + numberOfFailures = 0; + failureThreshold = failThreshold; + } - @Override - protected void chore() { - assertTrue(numberOfFailures == failureThreshold); - cancel(false); + @Override + protected boolean initialChore() { + if (numberOfFailures < failureThreshold) { + numberOfFailures++; + return false; + } else { + return true; } + } + @Override + protected void chore() { + assertTrue(numberOfFailures == failureThreshold); + cancel(false); } } @Test public void testInitialChorePrecedence() throws InterruptedException { - ChoreService service = new ChoreService("testInitialChorePrecedence"); - final int period = 100; final int failureThreshold = 5; - - try { - ScheduledChore chore = new FailInitialChore("chore", period, failureThreshold); - service.scheduleChore(chore); - - int loopCount = 0; - boolean brokeOutOfLoop = false; - - while (!chore.isInitialChoreComplete() && chore.isScheduled()) { - Thread.sleep(failureThreshold * period); - loopCount++; - if (loopCount > 3) { - brokeOutOfLoop = true; - break; - } + ScheduledChore chore = new FailInitialChore("chore", period, failureThreshold); + service.scheduleChore(chore); + + int loopCount = 0; + boolean brokeOutOfLoop = false; + + while (!chore.isInitialChoreComplete() && chore.isScheduled()) { + Thread.sleep(failureThreshold * period); + loopCount++; + if (loopCount > 3) { + brokeOutOfLoop = true; + break; } - - assertFalse(brokeOutOfLoop); - } finally { - shutdownService(service); } + + assertFalse(brokeOutOfLoop); } @Test public void testCancelChore() throws InterruptedException { final int period = 100; - ScheduledChore chore1 = new DoNothingChore("chore1", period); - ChoreService service = new ChoreService("testCancelChore"); - try { - service.scheduleChore(chore1); - assertTrue(chore1.isScheduled()); + ScheduledChore chore = new DoNothingChore("chore", period); + service.scheduleChore(chore); + assertTrue(chore.isScheduled()); - chore1.cancel(true); - assertFalse(chore1.isScheduled()); - assertTrue(service.getNumberOfScheduledChores() == 0); - } finally { - shutdownService(service); - } + chore.cancel(true); + assertFalse(chore.isScheduled()); + assertTrue(service.getNumberOfScheduledChores() == 0); } @Test @@ -304,12 +279,12 @@ public void testScheduledChoreConstruction() { final TimeUnit UNIT = TimeUnit.NANOSECONDS; ScheduledChore chore1 = - new ScheduledChore(NAME, new SampleStopper(), PERIOD, VALID_DELAY, UNIT) { - @Override - protected void chore() { - // DO NOTHING - } - }; + new ScheduledChore(NAME, new SampleStopper(), PERIOD, VALID_DELAY, UNIT) { + @Override + protected void chore() { + // DO NOTHING + } + }; assertEquals("Name construction failed", NAME, chore1.getName()); assertEquals("Period construction failed", PERIOD, chore1.getPeriod()); @@ -317,12 +292,12 @@ protected void chore() { assertEquals("TimeUnit construction failed", UNIT, chore1.getTimeUnit()); ScheduledChore invalidDelayChore = - new ScheduledChore(NAME, new SampleStopper(), PERIOD, INVALID_DELAY, UNIT) { - @Override - protected void chore() { - // DO NOTHING - } - }; + new ScheduledChore(NAME, new SampleStopper(), PERIOD, INVALID_DELAY, UNIT) { + @Override + protected void chore() { + // DO NOTHING + } + }; assertEquals("Initial Delay should be set to 0 when invalid", 0, invalidDelayChore.getInitialDelay()); @@ -334,7 +309,7 @@ public void testChoreServiceConstruction() throws InterruptedException { final int defaultCorePoolSize = ChoreService.MIN_CORE_POOL_SIZE; ChoreService customInit = - new ChoreService("testChoreServiceConstruction_custom", corePoolSize, false); + new ChoreService("testChoreServiceConstruction_custom", corePoolSize, false); try { assertEquals(corePoolSize, customInit.getCorePoolSize()); } finally { @@ -360,258 +335,218 @@ public void testChoreServiceConstruction() throws InterruptedException { public void testFrequencyOfChores() throws InterruptedException { final int period = 100; // Small delta that acts as time buffer (allowing chores to complete if running slowly) - final int delta = period/5; - ChoreService service = new ChoreService("testFrequencyOfChores"); + final int delta = period / 5; CountingChore chore = new CountingChore("countingChore", period); - try { - service.scheduleChore(chore); + service.scheduleChore(chore); - Thread.sleep(10 * period + delta); - assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); + Thread.sleep(10 * period + delta); + assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); - Thread.sleep(10 * period + delta); - assertEquals("20 periods have elapsed.", 21, chore.getCountOfChoreCalls()); - } finally { - shutdownService(service); - } + Thread.sleep(10 * period + delta); + assertEquals("20 periods have elapsed.", 21, chore.getCountOfChoreCalls()); } - public void shutdownService(ChoreService service) throws InterruptedException { + public void shutdownService(ChoreService service) { service.shutdown(); - while (!service.isTerminated()) { - Thread.sleep(100); - } + Waiter.waitFor(CONF, 1000, () -> service.isTerminated()); } @Test public void testForceTrigger() throws InterruptedException { final int period = 100; - final int delta = period/10; - ChoreService service = new ChoreService("testForceTrigger"); + final int delta = period / 10; final CountingChore chore = new CountingChore("countingChore", period); - try { - service.scheduleChore(chore); - Thread.sleep(10 * period + delta); - - assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); - - // Force five runs of the chore to occur, sleeping between triggers to ensure the - // chore has time to run - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - - assertEquals("Trigger was called 5 times after 10 periods.", 16, - chore.getCountOfChoreCalls()); - - Thread.sleep(10 * period + delta); - - // Be loosey-goosey. It used to be '26' but it was a big flakey relying on timing. - assertTrue("Expected at least 16 invocations, instead got " + chore.getCountOfChoreCalls(), - chore.getCountOfChoreCalls() > 16); - } finally { - shutdownService(service); - } + service.scheduleChore(chore); + Thread.sleep(10 * period + delta); + + assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); + + // Force five runs of the chore to occur, sleeping between triggers to ensure the + // chore has time to run + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + + assertEquals("Trigger was called 5 times after 10 periods.", 16, chore.getCountOfChoreCalls()); + + Thread.sleep(10 * period + delta); + + // Be loosey-goosey. It used to be '26' but it was a big flakey relying on timing. + assertTrue("Expected at least 16 invocations, instead got " + chore.getCountOfChoreCalls(), + chore.getCountOfChoreCalls() > 16); } @Test public void testCorePoolIncrease() throws InterruptedException { - final int initialCorePoolSize = 3; - ChoreService service = new ChoreService("testCorePoolIncrease", initialCorePoolSize, false); + assertEquals("Setting core pool size gave unexpected results.", initialCorePoolSize, + service.getCorePoolSize()); - try { - assertEquals("Setting core pool size gave unexpected results.", initialCorePoolSize, - service.getCorePoolSize()); - - final int slowChorePeriod = 100; - SlowChore slowChore1 = new SlowChore("slowChore1", slowChorePeriod); - SlowChore slowChore2 = new SlowChore("slowChore2", slowChorePeriod); - SlowChore slowChore3 = new SlowChore("slowChore3", slowChorePeriod); + final int slowChorePeriod = 100; + SlowChore slowChore1 = new SlowChore("slowChore1", slowChorePeriod); + SlowChore slowChore2 = new SlowChore("slowChore2", slowChorePeriod); + SlowChore slowChore3 = new SlowChore("slowChore3", slowChorePeriod); - service.scheduleChore(slowChore1); - service.scheduleChore(slowChore2); - service.scheduleChore(slowChore3); + service.scheduleChore(slowChore1); + service.scheduleChore(slowChore2); + service.scheduleChore(slowChore3); - Thread.sleep(slowChorePeriod * 10); - assertEquals("Should not create more pools than scheduled chores", 3, - service.getCorePoolSize()); + Thread.sleep(slowChorePeriod * 10); + assertEquals("Should not create more pools than scheduled chores", 3, + service.getCorePoolSize()); - SlowChore slowChore4 = new SlowChore("slowChore4", slowChorePeriod); - service.scheduleChore(slowChore4); + SlowChore slowChore4 = new SlowChore("slowChore4", slowChorePeriod); + service.scheduleChore(slowChore4); - Thread.sleep(slowChorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", 4, - service.getCorePoolSize()); + Thread.sleep(slowChorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", 4, + service.getCorePoolSize()); - SlowChore slowChore5 = new SlowChore("slowChore5", slowChorePeriod); - service.scheduleChore(slowChore5); + SlowChore slowChore5 = new SlowChore("slowChore5", slowChorePeriod); + service.scheduleChore(slowChore5); - Thread.sleep(slowChorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", 5, - service.getCorePoolSize()); - } finally { - shutdownService(service); - } + Thread.sleep(slowChorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", 5, + service.getCorePoolSize()); } @Test public void testCorePoolDecrease() throws InterruptedException { - final int initialCorePoolSize = 3; - ChoreService service = new ChoreService("testCorePoolDecrease", initialCorePoolSize, false); final int chorePeriod = 100; - try { - // Slow chores always miss their start time and thus the core pool size should be at least as - // large as the number of running slow chores - SlowChore slowChore1 = new SlowChore("slowChore1", chorePeriod); - SlowChore slowChore2 = new SlowChore("slowChore2", chorePeriod); - SlowChore slowChore3 = new SlowChore("slowChore3", chorePeriod); - - service.scheduleChore(slowChore1); - service.scheduleChore(slowChore2); - service.scheduleChore(slowChore3); - - Thread.sleep(chorePeriod * 10); - assertEquals("Should not create more pools than scheduled chores", - service.getNumberOfScheduledChores(), service.getCorePoolSize()); - - SlowChore slowChore4 = new SlowChore("slowChore4", chorePeriod); - service.scheduleChore(slowChore4); - Thread.sleep(chorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", - service.getNumberOfScheduledChores(), service.getCorePoolSize()); - - SlowChore slowChore5 = new SlowChore("slowChore5", chorePeriod); - service.scheduleChore(slowChore5); - Thread.sleep(chorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", - service.getNumberOfScheduledChores(), service.getCorePoolSize()); - assertEquals(5, service.getNumberOfChoresMissingStartTime()); - - // Now we begin to cancel the chores that caused an increase in the core thread pool of the - // ChoreService. These cancellations should cause a decrease in the core thread pool. - slowChore5.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(4, service.getNumberOfChoresMissingStartTime()); - - slowChore4.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(3, service.getNumberOfChoresMissingStartTime()); - - slowChore3.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(2, service.getNumberOfChoresMissingStartTime()); - - slowChore2.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(1, service.getNumberOfChoresMissingStartTime()); - - slowChore1.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(0, service.getNumberOfChoresMissingStartTime()); - } finally { - shutdownService(service); - } + // Slow chores always miss their start time and thus the core pool size should be at least as + // large as the number of running slow chores + SlowChore slowChore1 = new SlowChore("slowChore1", chorePeriod); + SlowChore slowChore2 = new SlowChore("slowChore2", chorePeriod); + SlowChore slowChore3 = new SlowChore("slowChore3", chorePeriod); + + service.scheduleChore(slowChore1); + service.scheduleChore(slowChore2); + service.scheduleChore(slowChore3); + + Thread.sleep(chorePeriod * 10); + assertEquals("Should not create more pools than scheduled chores", + service.getNumberOfScheduledChores(), service.getCorePoolSize()); + + SlowChore slowChore4 = new SlowChore("slowChore4", chorePeriod); + service.scheduleChore(slowChore4); + Thread.sleep(chorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", + service.getNumberOfScheduledChores(), service.getCorePoolSize()); + + SlowChore slowChore5 = new SlowChore("slowChore5", chorePeriod); + service.scheduleChore(slowChore5); + Thread.sleep(chorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", + service.getNumberOfScheduledChores(), service.getCorePoolSize()); + assertEquals(5, service.getNumberOfChoresMissingStartTime()); + + // Now we begin to cancel the chores that caused an increase in the core thread pool of the + // ChoreService. These cancellations should cause a decrease in the core thread pool. + slowChore5.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(4, service.getNumberOfChoresMissingStartTime()); + + slowChore4.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(3, service.getNumberOfChoresMissingStartTime()); + + slowChore3.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(2, service.getNumberOfChoresMissingStartTime()); + + slowChore2.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(1, service.getNumberOfChoresMissingStartTime()); + + slowChore1.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(0, service.getNumberOfChoresMissingStartTime()); } @Test public void testNumberOfRunningChores() throws InterruptedException { - ChoreService service = new ChoreService("testNumberOfRunningChores"); - final int period = 100; final int sleepTime = 5; - - try { - DoNothingChore dn1 = new DoNothingChore("dn1", period); - DoNothingChore dn2 = new DoNothingChore("dn2", period); - DoNothingChore dn3 = new DoNothingChore("dn3", period); - DoNothingChore dn4 = new DoNothingChore("dn4", period); - DoNothingChore dn5 = new DoNothingChore("dn5", period); - - service.scheduleChore(dn1); - service.scheduleChore(dn2); - service.scheduleChore(dn3); - service.scheduleChore(dn4); - service.scheduleChore(dn5); - - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 5, service.getNumberOfScheduledChores()); - - dn1.cancel(); - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 4, service.getNumberOfScheduledChores()); - - dn2.cancel(); - dn3.cancel(); - dn4.cancel(); - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 1, service.getNumberOfScheduledChores()); - - dn5.cancel(); - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 0, service.getNumberOfScheduledChores()); - } finally { - shutdownService(service); - } + DoNothingChore dn1 = new DoNothingChore("dn1", period); + DoNothingChore dn2 = new DoNothingChore("dn2", period); + DoNothingChore dn3 = new DoNothingChore("dn3", period); + DoNothingChore dn4 = new DoNothingChore("dn4", period); + DoNothingChore dn5 = new DoNothingChore("dn5", period); + + service.scheduleChore(dn1); + service.scheduleChore(dn2); + service.scheduleChore(dn3); + service.scheduleChore(dn4); + service.scheduleChore(dn5); + + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 5, service.getNumberOfScheduledChores()); + + dn1.cancel(); + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 4, service.getNumberOfScheduledChores()); + + dn2.cancel(); + dn3.cancel(); + dn4.cancel(); + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 1, service.getNumberOfScheduledChores()); + + dn5.cancel(); + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 0, service.getNumberOfScheduledChores()); } @Test public void testNumberOfChoresMissingStartTime() throws InterruptedException { - ChoreService service = new ChoreService("testNumberOfChoresMissingStartTime"); - final int period = 100; final int sleepTime = 20 * period; - - try { - // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores - // ALWAYS miss their start time since their execution takes longer than their period - SlowChore sc1 = new SlowChore("sc1", period); - SlowChore sc2 = new SlowChore("sc2", period); - SlowChore sc3 = new SlowChore("sc3", period); - SlowChore sc4 = new SlowChore("sc4", period); - SlowChore sc5 = new SlowChore("sc5", period); - - service.scheduleChore(sc1); - service.scheduleChore(sc2); - service.scheduleChore(sc3); - service.scheduleChore(sc4); - service.scheduleChore(sc5); - - Thread.sleep(sleepTime); - assertEquals(5, service.getNumberOfChoresMissingStartTime()); - - sc1.cancel(); - Thread.sleep(sleepTime); - assertEquals(4, service.getNumberOfChoresMissingStartTime()); - - sc2.cancel(); - sc3.cancel(); - sc4.cancel(); - Thread.sleep(sleepTime); - assertEquals(1, service.getNumberOfChoresMissingStartTime()); - - sc5.cancel(); - Thread.sleep(sleepTime); - assertEquals(0, service.getNumberOfChoresMissingStartTime()); - } finally { - shutdownService(service); - } + // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores + // ALWAYS miss their start time since their execution takes longer than their period + SlowChore sc1 = new SlowChore("sc1", period); + SlowChore sc2 = new SlowChore("sc2", period); + SlowChore sc3 = new SlowChore("sc3", period); + SlowChore sc4 = new SlowChore("sc4", period); + SlowChore sc5 = new SlowChore("sc5", period); + + service.scheduleChore(sc1); + service.scheduleChore(sc2); + service.scheduleChore(sc3); + service.scheduleChore(sc4); + service.scheduleChore(sc5); + + Thread.sleep(sleepTime); + assertEquals(5, service.getNumberOfChoresMissingStartTime()); + + sc1.cancel(); + Thread.sleep(sleepTime); + assertEquals(4, service.getNumberOfChoresMissingStartTime()); + + sc2.cancel(); + sc3.cancel(); + sc4.cancel(); + Thread.sleep(sleepTime); + assertEquals(1, service.getNumberOfChoresMissingStartTime()); + + sc5.cancel(); + Thread.sleep(sleepTime); + assertEquals(0, service.getNumberOfChoresMissingStartTime()); } /** @@ -621,163 +556,145 @@ public void testNumberOfChoresMissingStartTime() throws InterruptedException { */ @Test public void testMaximumChoreServiceThreads() throws InterruptedException { - ChoreService service = new ChoreService("testMaximumChoreServiceThreads"); final int period = 100; final int sleepTime = 5 * period; - - try { - // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores - // ALWAYS miss their start time since their execution takes longer than their period. - // Chores that miss their start time will trigger the onChoreMissedStartTime callback - // in the ChoreService. This callback will try to increase the number of core pool - // threads. - SlowChore sc1 = new SlowChore("sc1", period); - SlowChore sc2 = new SlowChore("sc2", period); - SlowChore sc3 = new SlowChore("sc3", period); - SlowChore sc4 = new SlowChore("sc4", period); - SlowChore sc5 = new SlowChore("sc5", period); - - service.scheduleChore(sc1); - service.scheduleChore(sc2); - service.scheduleChore(sc3); - service.scheduleChore(sc4); - service.scheduleChore(sc5); - - Thread.sleep(sleepTime); - assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); - - SlowChore sc6 = new SlowChore("sc6", period); - SlowChore sc7 = new SlowChore("sc7", period); - SlowChore sc8 = new SlowChore("sc8", period); - SlowChore sc9 = new SlowChore("sc9", period); - SlowChore sc10 = new SlowChore("sc10", period); - - service.scheduleChore(sc6); - service.scheduleChore(sc7); - service.scheduleChore(sc8); - service.scheduleChore(sc9); - service.scheduleChore(sc10); - - Thread.sleep(sleepTime); - assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); - } finally { - shutdownService(service); - } + // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores + // ALWAYS miss their start time since their execution takes longer than their period. + // Chores that miss their start time will trigger the onChoreMissedStartTime callback + // in the ChoreService. This callback will try to increase the number of core pool + // threads. + SlowChore sc1 = new SlowChore("sc1", period); + SlowChore sc2 = new SlowChore("sc2", period); + SlowChore sc3 = new SlowChore("sc3", period); + SlowChore sc4 = new SlowChore("sc4", period); + SlowChore sc5 = new SlowChore("sc5", period); + + service.scheduleChore(sc1); + service.scheduleChore(sc2); + service.scheduleChore(sc3); + service.scheduleChore(sc4); + service.scheduleChore(sc5); + + Thread.sleep(sleepTime); + assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); + + SlowChore sc6 = new SlowChore("sc6", period); + SlowChore sc7 = new SlowChore("sc7", period); + SlowChore sc8 = new SlowChore("sc8", period); + SlowChore sc9 = new SlowChore("sc9", period); + SlowChore sc10 = new SlowChore("sc10", period); + + service.scheduleChore(sc6); + service.scheduleChore(sc7); + service.scheduleChore(sc8); + service.scheduleChore(sc9); + service.scheduleChore(sc10); + + Thread.sleep(sleepTime); + assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); } @Test public void testChangingChoreServices() throws InterruptedException { final int period = 100; final int sleepTime = 10; - ChoreService service1 = new ChoreService("testChangingChoreServices_1"); - ChoreService service2 = new ChoreService("testChangingChoreServices_2"); + ChoreService anotherService = new ChoreService(name.getMethodName() + "_2"); ScheduledChore chore = new DoNothingChore("sample", period); try { assertFalse(chore.isScheduled()); - assertFalse(service1.isChoreScheduled(chore)); - assertFalse(service2.isChoreScheduled(chore)); - assertTrue(chore.getChoreServicer() == null); + assertFalse(service.isChoreScheduled(chore)); + assertFalse(anotherService.isChoreScheduled(chore)); + assertTrue(chore.getChoreService() == null); - service1.scheduleChore(chore); + service.scheduleChore(chore); Thread.sleep(sleepTime); assertTrue(chore.isScheduled()); - assertTrue(service1.isChoreScheduled(chore)); - assertFalse(service2.isChoreScheduled(chore)); - assertFalse(chore.getChoreServicer() == null); + assertTrue(service.isChoreScheduled(chore)); + assertFalse(anotherService.isChoreScheduled(chore)); + assertFalse(chore.getChoreService() == null); - service2.scheduleChore(chore); + anotherService.scheduleChore(chore); Thread.sleep(sleepTime); assertTrue(chore.isScheduled()); - assertFalse(service1.isChoreScheduled(chore)); - assertTrue(service2.isChoreScheduled(chore)); - assertFalse(chore.getChoreServicer() == null); + assertFalse(service.isChoreScheduled(chore)); + assertTrue(anotherService.isChoreScheduled(chore)); + assertFalse(chore.getChoreService() == null); chore.cancel(); assertFalse(chore.isScheduled()); - assertFalse(service1.isChoreScheduled(chore)); - assertFalse(service2.isChoreScheduled(chore)); - assertTrue(chore.getChoreServicer() == null); + assertFalse(service.isChoreScheduled(chore)); + assertFalse(anotherService.isChoreScheduled(chore)); + assertTrue(chore.getChoreService() == null); } finally { - shutdownService(service1); - shutdownService(service2); + shutdownService(anotherService); } } @Test public void testStopperForScheduledChores() throws InterruptedException { - ChoreService service = new ChoreService("testStopperForScheduledChores"); Stoppable stopperForGroup1 = new SampleStopper(); Stoppable stopperForGroup2 = new SampleStopper(); final int period = 100; - final int delta = period/10; - - try { - ScheduledChore chore1_group1 = new DoNothingChore("c1g1", stopperForGroup1, period); - ScheduledChore chore2_group1 = new DoNothingChore("c2g1", stopperForGroup1, period); - ScheduledChore chore3_group1 = new DoNothingChore("c3g1", stopperForGroup1, period); - - ScheduledChore chore1_group2 = new DoNothingChore("c1g2", stopperForGroup2, period); - ScheduledChore chore2_group2 = new DoNothingChore("c2g2", stopperForGroup2, period); - ScheduledChore chore3_group2 = new DoNothingChore("c3g2", stopperForGroup2, period); - - service.scheduleChore(chore1_group1); - service.scheduleChore(chore2_group1); - service.scheduleChore(chore3_group1); - service.scheduleChore(chore1_group2); - service.scheduleChore(chore2_group2); - service.scheduleChore(chore3_group2); - - Thread.sleep(delta); - Thread.sleep(10 * period); - assertTrue(chore1_group1.isScheduled()); - assertTrue(chore2_group1.isScheduled()); - assertTrue(chore3_group1.isScheduled()); - assertTrue(chore1_group2.isScheduled()); - assertTrue(chore2_group2.isScheduled()); - assertTrue(chore3_group2.isScheduled()); - - stopperForGroup1.stop("test stopping group 1"); - Thread.sleep(period); - assertFalse(chore1_group1.isScheduled()); - assertFalse(chore2_group1.isScheduled()); - assertFalse(chore3_group1.isScheduled()); - assertTrue(chore1_group2.isScheduled()); - assertTrue(chore2_group2.isScheduled()); - assertTrue(chore3_group2.isScheduled()); - - stopperForGroup2.stop("test stopping group 2"); - Thread.sleep(period); - assertFalse(chore1_group1.isScheduled()); - assertFalse(chore2_group1.isScheduled()); - assertFalse(chore3_group1.isScheduled()); - assertFalse(chore1_group2.isScheduled()); - assertFalse(chore2_group2.isScheduled()); - assertFalse(chore3_group2.isScheduled()); - } finally { - shutdownService(service); - } + final int delta = period / 10; + ScheduledChore chore1_group1 = new DoNothingChore("c1g1", stopperForGroup1, period); + ScheduledChore chore2_group1 = new DoNothingChore("c2g1", stopperForGroup1, period); + ScheduledChore chore3_group1 = new DoNothingChore("c3g1", stopperForGroup1, period); + + ScheduledChore chore1_group2 = new DoNothingChore("c1g2", stopperForGroup2, period); + ScheduledChore chore2_group2 = new DoNothingChore("c2g2", stopperForGroup2, period); + ScheduledChore chore3_group2 = new DoNothingChore("c3g2", stopperForGroup2, period); + + service.scheduleChore(chore1_group1); + service.scheduleChore(chore2_group1); + service.scheduleChore(chore3_group1); + service.scheduleChore(chore1_group2); + service.scheduleChore(chore2_group2); + service.scheduleChore(chore3_group2); + + Thread.sleep(delta); + Thread.sleep(10 * period); + assertTrue(chore1_group1.isScheduled()); + assertTrue(chore2_group1.isScheduled()); + assertTrue(chore3_group1.isScheduled()); + assertTrue(chore1_group2.isScheduled()); + assertTrue(chore2_group2.isScheduled()); + assertTrue(chore3_group2.isScheduled()); + + stopperForGroup1.stop("test stopping group 1"); + Thread.sleep(period); + assertFalse(chore1_group1.isScheduled()); + assertFalse(chore2_group1.isScheduled()); + assertFalse(chore3_group1.isScheduled()); + assertTrue(chore1_group2.isScheduled()); + assertTrue(chore2_group2.isScheduled()); + assertTrue(chore3_group2.isScheduled()); + + stopperForGroup2.stop("test stopping group 2"); + Thread.sleep(period); + assertFalse(chore1_group1.isScheduled()); + assertFalse(chore2_group1.isScheduled()); + assertFalse(chore3_group1.isScheduled()); + assertFalse(chore1_group2.isScheduled()); + assertFalse(chore2_group2.isScheduled()); + assertFalse(chore3_group2.isScheduled()); } @Test public void testShutdownCancelsScheduledChores() throws InterruptedException { final int period = 100; - ChoreService service = new ChoreService("testShutdownCancelsScheduledChores"); ScheduledChore successChore1 = new DoNothingChore("sc1", period); ScheduledChore successChore2 = new DoNothingChore("sc2", period); ScheduledChore successChore3 = new DoNothingChore("sc3", period); + assertTrue(service.scheduleChore(successChore1)); + assertTrue(successChore1.isScheduled()); + assertTrue(service.scheduleChore(successChore2)); + assertTrue(successChore2.isScheduled()); + assertTrue(service.scheduleChore(successChore3)); + assertTrue(successChore3.isScheduled()); - try { - assertTrue(service.scheduleChore(successChore1)); - assertTrue(successChore1.isScheduled()); - assertTrue(service.scheduleChore(successChore2)); - assertTrue(successChore2.isScheduled()); - assertTrue(service.scheduleChore(successChore3)); - assertTrue(successChore3.isScheduled()); - } finally { - shutdownService(service); - } + shutdownService(service); assertFalse(successChore1.isScheduled()); assertFalse(successChore2.isScheduled()); @@ -788,34 +705,28 @@ public void testShutdownCancelsScheduledChores() throws InterruptedException { public void testShutdownWorksWhileChoresAreExecuting() throws InterruptedException { final int period = 100; final int sleep = 5 * period; - ChoreService service = new ChoreService("testShutdownWorksWhileChoresAreExecuting"); ScheduledChore slowChore1 = new SleepingChore("sc1", period, sleep); ScheduledChore slowChore2 = new SleepingChore("sc2", period, sleep); ScheduledChore slowChore3 = new SleepingChore("sc3", period, sleep); - try { - assertTrue(service.scheduleChore(slowChore1)); - assertTrue(service.scheduleChore(slowChore2)); - assertTrue(service.scheduleChore(slowChore3)); + assertTrue(service.scheduleChore(slowChore1)); + assertTrue(service.scheduleChore(slowChore2)); + assertTrue(service.scheduleChore(slowChore3)); - Thread.sleep(sleep / 2); - shutdownService(service); + Thread.sleep(sleep / 2); + shutdownService(service); - assertFalse(slowChore1.isScheduled()); - assertFalse(slowChore2.isScheduled()); - assertFalse(slowChore3.isScheduled()); - assertTrue(service.isShutdown()); + assertFalse(slowChore1.isScheduled()); + assertFalse(slowChore2.isScheduled()); + assertFalse(slowChore3.isScheduled()); + assertTrue(service.isShutdown()); - Thread.sleep(5); - assertTrue(service.isTerminated()); - } finally { - shutdownService(service); - } + Thread.sleep(5); + assertTrue(service.isTerminated()); } @Test public void testShutdownRejectsNewSchedules() throws InterruptedException { final int period = 100; - ChoreService service = new ChoreService("testShutdownRejectsNewSchedules"); ScheduledChore successChore1 = new DoNothingChore("sc1", period); ScheduledChore successChore2 = new DoNothingChore("sc2", period); ScheduledChore successChore3 = new DoNothingChore("sc3", period); @@ -823,16 +734,14 @@ public void testShutdownRejectsNewSchedules() throws InterruptedException { ScheduledChore failChore2 = new DoNothingChore("fc2", period); ScheduledChore failChore3 = new DoNothingChore("fc3", period); - try { - assertTrue(service.scheduleChore(successChore1)); - assertTrue(successChore1.isScheduled()); - assertTrue(service.scheduleChore(successChore2)); - assertTrue(successChore2.isScheduled()); - assertTrue(service.scheduleChore(successChore3)); - assertTrue(successChore3.isScheduled()); - } finally { - shutdownService(service); - } + assertTrue(service.scheduleChore(successChore1)); + assertTrue(successChore1.isScheduled()); + assertTrue(service.scheduleChore(successChore2)); + assertTrue(successChore2.isScheduled()); + assertTrue(service.scheduleChore(successChore3)); + assertTrue(successChore3.isScheduled()); + + shutdownService(service); assertFalse(service.scheduleChore(failChore1)); assertFalse(failChore1.isScheduled()); @@ -845,17 +754,38 @@ public void testShutdownRejectsNewSchedules() throws InterruptedException { /** * for HBASE-25014 */ - @Test(timeout = 10000) + @Test public void testInitialDelay() { - ChoreService service = new ChoreService(name.getMethodName()); SampleStopper stopper = new SampleStopper(); service.scheduleChore(new ScheduledChore("chore", stopper, 1000, 2000) { - @Override protected void chore() { + @Override + protected void chore() { stopper.stop("test"); } }); - while (!stopper.isStopped()) { - Threads.sleep(1000); - } + Waiter.waitFor(CONF, 5000, () -> stopper.isStopped()); + } + + @Test + public void testCleanupWithStopper() { + SampleStopper stopper = new SampleStopper(); + DoNothingChore chore = spy(new DoNothingChore("chore", stopper, 10)); + service.scheduleChore(chore); + assertTrue(chore.isScheduled()); + verify(chore, never()).cleanup(); + stopper.stop("test"); + Waiter.waitFor(CONF, 200, () -> !chore.isScheduled()); + verify(chore, atLeastOnce()).cleanup(); + } + + @Test + public void testCleanupWithShutdown() { + DoNothingChore chore = spy(new DoNothingChore("chore", 10)); + service.scheduleChore(chore); + assertTrue(chore.isScheduled()); + verify(chore, never()).cleanup(); + chore.shutdown(true); + Waiter.waitFor(CONF, 200, () -> !chore.isScheduled()); + verify(chore, atLeastOnce()).cleanup(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index cbe001e91588..94f3bf2bfda7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -55,7 +55,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -1500,11 +1499,9 @@ private void switchSnapshotCleanup(final boolean on) { try { snapshotCleanupTracker.setSnapshotCleanupEnabled(on); if (on) { - if (!getChoreService().isChoreScheduled(this.snapshotCleanerChore)) { - getChoreService().scheduleChore(this.snapshotCleanerChore); - } + getChoreService().scheduleChore(this.snapshotCleanerChore); } else { - getChoreService().cancelChore(this.snapshotCleanerChore); + this.snapshotCleanerChore.cancel(); } } catch (KeeperException e) { LOG.error("Error updating snapshot cleanup mode to {}", on, e); @@ -1528,24 +1525,23 @@ private void stopProcedureExecutor() { } private void stopChores() { - ChoreService choreService = getChoreService(); - if (choreService != null) { - choreService.cancelChore(this.mobFileCleanerChore); - choreService.cancelChore(this.mobFileCompactionChore); - choreService.cancelChore(this.balancerChore); + if (getChoreService() != null) { + shutdownChore(mobFileCleanerChore); + shutdownChore(mobFileCompactionChore); + shutdownChore(balancerChore); if (regionNormalizerManager != null) { - choreService.cancelChore(regionNormalizerManager.getRegionNormalizerChore()); - } - choreService.cancelChore(this.clusterStatusChore); - choreService.cancelChore(this.catalogJanitorChore); - choreService.cancelChore(this.clusterStatusPublisherChore); - choreService.cancelChore(this.snapshotQuotaChore); - choreService.cancelChore(this.logCleaner); - choreService.cancelChore(this.hfileCleaner); - choreService.cancelChore(this.replicationBarrierCleaner); - choreService.cancelChore(this.snapshotCleanerChore); - choreService.cancelChore(this.hbckChore); - choreService.cancelChore(this.regionsRecoveryChore); + shutdownChore(regionNormalizerManager.getRegionNormalizerChore()); + } + shutdownChore(clusterStatusChore); + shutdownChore(catalogJanitorChore); + shutdownChore(clusterStatusPublisherChore); + shutdownChore(snapshotQuotaChore); + shutdownChore(logCleaner); + shutdownChore(hfileCleaner); + shutdownChore(replicationBarrierCleaner); + shutdownChore(snapshotCleanerChore); + shutdownChore(hbckChore); + shutdownChore(regionsRecoveryChore); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java index a756715062ec..5597cca1152b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HConstants; @@ -70,7 +69,6 @@ public class RegionsRecoveryChore extends ScheduledChore { */ RegionsRecoveryChore(final Stoppable stopper, final Configuration configuration, final HMaster hMaster) { - super(REGIONS_RECOVERY_CHORE_NAME, stopper, configuration.getInt( HConstants.REGIONS_RECOVERY_INTERVAL, HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL)); this.hMaster = hMaster; @@ -125,7 +123,6 @@ protected void chore() { private Map> getTableToRegionsByRefCount( final Map serverMetricsMap) { - final Map> tableToReopenRegionsMap = new HashMap<>(); for (ServerMetrics serverMetrics : serverMetricsMap.values()) { Map regionMetricsMap = serverMetrics.getRegionMetrics(); @@ -146,13 +143,11 @@ private Map> getTableToRegionsByRefCount( } } return tableToReopenRegionsMap; - } private void prepareTableToReopenRegionsMap( final Map> tableToReopenRegionsMap, final byte[] regionName, final int regionStoreRefCount) { - final RegionInfo regionInfo = hMaster.getAssignmentManager().getRegionInfo(regionName); final TableName tableName = regionInfo.getTable(); if (TableName.isMetaTableName(tableName)) { @@ -165,21 +160,4 @@ private void prepareTableToReopenRegionsMap( tableToReopenRegionsMap .computeIfAbsent(tableName, (key) -> new ArrayList<>()).add(regionName); } - - // hashcode/equals implementation to ensure at-most one object of RegionsRecoveryChore - // is scheduled at a time - RegionsRecoveryConfigManager - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - return o != null && getClass() == o.getClass(); - } - - @Override - public int hashCode() { - return 31; - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java index b1bfdc0ecb04..78777a18cfd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master; +import com.google.errorprone.annotations.RestrictedApi; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HConstants; @@ -27,8 +28,7 @@ import org.slf4j.LoggerFactory; /** - * Config manager for RegionsRecovery Chore - Dynamically reload config and update chore - * accordingly + * Config manager for RegionsRecovery Chore - Dynamically reload config and update chore accordingly */ @InterfaceAudience.Private public class RegionsRecoveryConfigManager implements ConfigurationObserver { @@ -36,6 +36,7 @@ public class RegionsRecoveryConfigManager implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(RegionsRecoveryConfigManager.class); private final HMaster hMaster; + private RegionsRecoveryChore chore; private int prevMaxStoreFileRefCount; private int prevRegionsRecoveryInterval; @@ -51,34 +52,35 @@ public void onConfigurationChange(Configuration conf) { final int newMaxStoreFileRefCount = getMaxStoreFileRefCount(conf); final int newRegionsRecoveryInterval = getRegionsRecoveryChoreInterval(conf); - if (prevMaxStoreFileRefCount == newMaxStoreFileRefCount - && prevRegionsRecoveryInterval == newRegionsRecoveryInterval) { + if (prevMaxStoreFileRefCount == newMaxStoreFileRefCount && + prevRegionsRecoveryInterval == newRegionsRecoveryInterval) { // no need to re-schedule the chore with updated config // as there is no change in desired configs return; } - LOG.info("Config Reload for RegionsRecovery Chore. prevMaxStoreFileRefCount: {}," + + LOG.info( + "Config Reload for RegionsRecovery Chore. prevMaxStoreFileRefCount: {}," + " newMaxStoreFileRefCount: {}, prevRegionsRecoveryInterval: {}, " + - "newRegionsRecoveryInterval: {}", prevMaxStoreFileRefCount, newMaxStoreFileRefCount, - prevRegionsRecoveryInterval, newRegionsRecoveryInterval); + "newRegionsRecoveryInterval: {}", + prevMaxStoreFileRefCount, newMaxStoreFileRefCount, prevRegionsRecoveryInterval, + newRegionsRecoveryInterval); - RegionsRecoveryChore regionsRecoveryChore = new RegionsRecoveryChore(this.hMaster, - conf, this.hMaster); + RegionsRecoveryChore regionsRecoveryChore = + new RegionsRecoveryChore(this.hMaster, conf, this.hMaster); ChoreService choreService = this.hMaster.getChoreService(); // Regions Reopen based on very high storeFileRefCount is considered enabled // only if hbase.regions.recovery.store.file.ref.count has value > 0 - synchronized (this) { + if (chore != null) { + chore.shutdown(); + chore = null; + } if (newMaxStoreFileRefCount > 0) { - // reschedule the chore - // provide mayInterruptIfRunning - false to take care of completion - // of in progress task if any - choreService.cancelChore(regionsRecoveryChore, false); + // schedule the new chore choreService.scheduleChore(regionsRecoveryChore); - } else { - choreService.cancelChore(regionsRecoveryChore, false); + chore = regionsRecoveryChore; } this.prevMaxStoreFileRefCount = newMaxStoreFileRefCount; this.prevRegionsRecoveryInterval = newRegionsRecoveryInterval; @@ -86,15 +88,18 @@ public void onConfigurationChange(Configuration conf) { } private int getMaxStoreFileRefCount(Configuration configuration) { - return configuration.getInt( - HConstants.STORE_FILE_REF_COUNT_THRESHOLD, + return configuration.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD); } private int getRegionsRecoveryChoreInterval(Configuration configuration) { - return configuration.getInt( - HConstants.REGIONS_RECOVERY_INTERVAL, + return configuration.getInt(HConstants.REGIONS_RECOVERY_INTERVAL, HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL); } + @RestrictedApi(explanation = "Only visible for testing", link = "", + allowedOnPath = ".*/src/test/.*") + RegionsRecoveryChore getChore() { + return chore; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 8977174edba7..f91f04000cd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -936,7 +936,7 @@ public void startChore() { */ public void stop() { if (flushedSeqIdFlusher != null) { - flushedSeqIdFlusher.cancel(); + flushedSeqIdFlusher.shutdown(); } if (persistFlushedSequenceId) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index f628841cb4fc..186a8ff11bba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -456,7 +456,7 @@ public void stop() { choreService.shutdown(); } if (timeoutMonitor != null) { - timeoutMonitor.cancel(true); + timeoutMonitor.shutdown(true); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index bd1bff157cd4..f2d88bac527a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -102,7 +102,7 @@ public void start() throws IOException { public void stop(final String why) { if (refreshChore != null) { LOG.debug("Stopping QuotaRefresherChore chore."); - refreshChore.cancel(true); + refreshChore.shutdown(true); } stopped = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java index 81e7e87603c0..282075b6d71b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java @@ -98,11 +98,11 @@ public synchronized void start() throws IOException { public synchronized void stop() { if (spaceQuotaRefresher != null) { - spaceQuotaRefresher.cancel(); + spaceQuotaRefresher.shutdown(); spaceQuotaRefresher = null; } if (regionSizeReporter != null) { - regionSizeReporter.cancel(); + regionSizeReporter.shutdown(); regionSizeReporter = null; } started = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index bcb143652203..e40e25158269 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -2642,6 +2642,11 @@ private void scheduleAbortTimer() { } } + protected final void shutdownChore(ScheduledChore chore) { + if (chore != null) { + chore.shutdown(); + } + } /** * Wait on all threads to finish. Presumption is that all closes and stops * have already been called. @@ -2649,15 +2654,16 @@ private void scheduleAbortTimer() { protected void stopServiceThreads() { // clean up the scheduled chores if (this.choreService != null) { - choreService.cancelChore(nonceManagerChore); - choreService.cancelChore(compactionChecker); - choreService.cancelChore(periodicFlusher); - choreService.cancelChore(healthCheckChore); - choreService.cancelChore(executorStatusChore); - choreService.cancelChore(storefileRefresher); - choreService.cancelChore(fsUtilizationChore); - choreService.cancelChore(slowLogTableOpsChore); - // clean up the remaining scheduled chores (in case we missed out any) + shutdownChore(nonceManagerChore); + shutdownChore(compactionChecker); + shutdownChore(periodicFlusher); + shutdownChore(healthCheckChore); + shutdownChore(executorStatusChore); + shutdownChore(storefileRefresher); + shutdownChore(fsUtilizationChore); + shutdownChore(slowLogTableOpsChore); + // cancel the remaining scheduled chores (in case we missed out any) + // TODO: cancel will not cleanup the chores, so we need make sure we do not miss any choreService.shutdown(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index 1f831eefee62..342ec18e1ed9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -216,7 +216,7 @@ public void start(ChoreService service) { public void stop() { // The thread is Daemon. Just interrupting the ongoing process. LOG.info("Stopping"); - this.heapMemTunerChore.cancel(true); + this.heapMemTunerChore.shutdown(true); } public void registerTuneObserver(HeapMemoryTuneObserver observer) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java index d29e061d07fd..6819e5d2b110 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java @@ -18,18 +18,18 @@ package org.apache.hadoop.hbase.master; -import java.io.IOException; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.StartMiniClusterOption; -import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -38,7 +38,7 @@ /** * Test for Regions Recovery Config Manager */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestRegionsRecoveryConfigManager { @ClassRule @@ -51,8 +51,6 @@ public class TestRegionsRecoveryConfigManager { private HMaster hMaster; - private RegionsRecoveryChore regionsRecoveryChore; - private RegionsRecoveryConfigManager regionsRecoveryConfigManager; private Configuration conf; @@ -62,10 +60,8 @@ public void setup() throws Exception { conf = HBASE_TESTING_UTILITY.getConfiguration(); conf.unset("hbase.regions.recovery.store.file.ref.count"); conf.unset("hbase.master.regions.recovery.check.interval"); - StartMiniClusterOption option = StartMiniClusterOption.builder() - .masterClass(TestHMaster.class) - .numRegionServers(1) - .numDataNodes(1).build(); + StartMiniClusterOption option = StartMiniClusterOption.builder().masterClass(TestHMaster.class) + .numRegionServers(1).numDataNodes(1).build(); HBASE_TESTING_UTILITY.startMiniCluster(option); cluster = HBASE_TESTING_UTILITY.getMiniHBaseCluster(); } @@ -77,44 +73,44 @@ public void tearDown() throws Exception { @Test public void testChoreSchedule() throws Exception { - this.hMaster = cluster.getMaster(); - Stoppable stoppable = new StoppableImplementation(); - this.regionsRecoveryChore = new RegionsRecoveryChore(stoppable, conf, hMaster); - this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this.hMaster); // not yet scheduled - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // not yet scheduled - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.master.regions.recovery.check.interval", 10); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // not yet scheduled - missing config: hbase.regions.recovery.store.file.ref.count - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.regions.recovery.store.file.ref.count", 10); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore scheduled - Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.regions.recovery.store.file.ref.count", 20); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore re-scheduled - Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.regions.recovery.store.file.ref.count", 20); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore scheduling untouched - Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.unset("hbase.regions.recovery.store.file.ref.count"); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore un-scheduled - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); } // Make it public so that JVMClusterUtil can access it. @@ -123,24 +119,4 @@ public TestHMaster(Configuration conf) throws IOException { super(conf); } } - - /** - * Simple helper class that just keeps track of whether or not its stopped. - */ - private static class StoppableImplementation implements Stoppable { - - private boolean stop = false; - - @Override - public void stop(String why) { - this.stop = true; - } - - @Override - public boolean isStopped() { - return this.stop; - } - - } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java index 252276819cbe..b1fdf2676575 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java @@ -111,7 +111,7 @@ public void setup() throws IOException, KeeperException { @After public void teardown() { - this.janitor.cancel(true); + this.janitor.shutdown(true); this.masterServices.stop("DONE"); } From 5c13c684940ed6f7076b8f8240b33545058e4bdb Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Wed, 20 Jan 2021 09:04:50 -0800 Subject: [PATCH 341/769] =?UTF-8?q?HBASE-25368=20Filter=20out=20more=20inv?= =?UTF-8?q?alid=20encoded=20name=20in=20isEncodedRegionNa=E2=80=A6=20(#286?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) Signed-off-by: Duo Zhang --- .../hbase/client/RawAsyncHBaseAdmin.java | 87 ++++++++++--------- .../hadoop/hbase/client/RegionInfo.java | 18 +++- .../hadoop/hbase/client/TestAdmin1.java | 19 ++++ .../hadoop/hbase/client/TestAdmin2.java | 8 +- 4 files changed, 85 insertions(+), 47 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 512e7a96aa6d..38bdddef1e5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2388,51 +2388,56 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - try { - CompletableFuture> future; - if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { - String encodedName = Bytes.toString(regionNameOrEncodedRegionName); - if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { - // old format encodedName, should be meta region - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); - } else { - future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, - regionNameOrEncodedRegionName); - } + + CompletableFuture> future; + if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { + String encodedName = Bytes.toString(regionNameOrEncodedRegionName); + if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { + // old format encodedName, should be meta region + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); } else { - RegionInfo regionInfo = - CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); - if (regionInfo.isMetaRegion()) { - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); - } else { - future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); - } + future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, + regionNameOrEncodedRegionName); + } + } else { + // Not all regionNameOrEncodedRegionName here is going to be a valid region name, + // it needs to throw out IllegalArgumentException in case tableName is passed in. + RegionInfo regionInfo; + try { + regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( + regionNameOrEncodedRegionName); + } catch (IOException ioe) { + return failedFuture(new IllegalArgumentException(ioe.getMessage())); } - CompletableFuture returnedFuture = new CompletableFuture<>(); - addListener(future, (location, err) -> { - if (err != null) { - returnedFuture.completeExceptionally(err); - return; - } - if (!location.isPresent() || location.get().getRegion() == null) { - returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); - } else { - returnedFuture.complete(location.get()); - } - }); - return returnedFuture; - } catch (IOException e) { - return failedFuture(e); + if (regionInfo.isMetaRegion()) { + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); + } else { + future = + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + } } + + CompletableFuture returnedFuture = new CompletableFuture<>(); + addListener(future, (location, err) -> { + if (err != null) { + returnedFuture.completeExceptionally(err); + return; + } + if (!location.isPresent() || location.get().getRegion() == null) { + returnedFuture.completeExceptionally( + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); + } else { + returnedFuture.complete(location.get()); + } + }); + return returnedFuture; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index d7460e9d15ef..b6bdd0103de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,7 +363,23 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; + if (parseRegionNameOrReturnNull(regionName) == null) { + if (regionName.length > MD5_HEX_LENGTH) { + return false; + } else if (regionName.length == MD5_HEX_LENGTH) { + return true; + } else { + String encodedName = Bytes.toString(regionName); + try { + Integer.parseInt(encodedName); + // If this is a valid integer, it could be hbase:meta's encoded region name. + return true; + } catch(NumberFormatException er) { + return false; + } + } + } + return false; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index a0ed836f9c75..b48841660166 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -99,6 +100,24 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { assertTrue(exception instanceof TableNotFoundException); } + @Test + public void testCompactATableWithSuperLongTableName() throws Exception { + TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + try { + ADMIN.createTable(htd); + assertThrows(IllegalArgumentException.class, + () -> ADMIN.majorCompactRegion(tableName.getName())); + + assertThrows(IllegalArgumentException.class, + () -> ADMIN.majorCompactRegion(Bytes.toBytes("abcd"))); + } finally { + ADMIN.disableTable(tableName); + ADMIN.deleteTable(tableName); + } + } + @Test public void testCompactionTimestamps() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 914152b58dec..b0271a006aca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -298,11 +298,9 @@ public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { if (!regionInfo.isMetaRegion()) { if (regionInfo.getRegionNameAsString().contains(name)) { info = regionInfo; - try { - ADMIN.unassign(Bytes.toBytes("sample"), true); - } catch (UnknownRegionException nsre) { - // expected, ignore it - } + assertThrows(UnknownRegionException.class, + () -> ADMIN.unassign(Bytes.toBytes( + "test,,1358563771069.acc1ad1b7962564fc3a43e5907e8db33."), true)); } } } From f39636c05e057edbbc7b81268599c305dcee496a Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Fri, 22 Jan 2021 19:25:06 -0800 Subject: [PATCH 342/769] HBASE-25416 Add 2.3.4 to the downloads page (#2902) Signed-off-by: Nick Dimiduk --- src/site/xdoc/downloads.xml | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index fe6f3d8d198e..a4123ba5d559 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -68,26 +68,26 @@ under the License. - 2.3.3 + 2.3.4 - 2020/11/02 + 2021/01/22 - 2.3.2 vs 2.3.3 + 2.3.3 vs 2.3.4 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) - + stable release @@ -110,7 +110,6 @@ under the License. bin (sha512 asc)
    client-bin (sha512 asc) - stable release From 0b0cdcac5352b12b907c32eb0fec6a7c65fe4904 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Sun, 24 Jan 2021 23:37:03 +0800 Subject: [PATCH 343/769] HBASE-25522 Remove deprecated methods in ReplicationPeerConfig (#2898) Signed-off-by: Viraj Jasani --- .../replication/ReplicationPeerConfig.java | 97 ----------------- .../client/TestAsyncReplicationAdminApi.java | 102 ++++++++---------- ...tAsyncReplicationAdminApiWithClusters.java | 27 ++--- .../hbase/client/TestReplicaWithCluster.java | 4 +- .../replication/TestMasterReplication.java | 10 +- .../TestMultiSlaveReplication.java | 11 +- .../replication/TestReplicationEndpoint.java | 60 ++++++----- .../replication/TestReplicationWithTags.java | 4 +- .../TestGlobalReplicationThrottler.java | 4 +- .../regionserver/TestReplicator.java | 9 +- .../security/access/TestAccessController.java | 3 +- ...bilityLabelReplicationWithExpAsString.java | 5 +- .../TestVisibilityLabelsReplication.java | 4 +- 13 files changed, 125 insertions(+), 215 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index bb3ff042ca06..5ca5cef9c4ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -79,41 +79,6 @@ private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) { return Collections.unmodifiableMap(newTableCFsMap); } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder} to create new ReplicationPeerConfig. - */ - @Deprecated - public ReplicationPeerConfig() { - this.peerData = new TreeMap<>(Bytes.BYTES_COMPARATOR); - this.configuration = new HashMap<>(0); - this.serial = false; - } - - /** - * Set the clusterKey which is the concatenation of the slave cluster's: - * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setClusterKey(String)} instead. - */ - @Deprecated - public ReplicationPeerConfig setClusterKey(String clusterKey) { - this.clusterKey = clusterKey; - return this; - } - - /** - * Sets the ReplicationEndpoint plugin class for this peer. - * @param replicationEndpointImpl a class implementing ReplicationEndpoint - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setReplicationEndpointImpl(String)} instead. - */ - @Deprecated - public ReplicationPeerConfig setReplicationEndpointImpl(String replicationEndpointImpl) { - this.replicationEndpointImpl = replicationEndpointImpl; - return this; - } - public String getClusterKey() { return clusterKey; } @@ -134,88 +99,26 @@ public Map> getTableCFsMap() { return (Map>) tableCFsMap; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setTableCFsMap(Map)} instead. - */ - @Deprecated - public ReplicationPeerConfig setTableCFsMap(Map> tableCFsMap) { - this.tableCFsMap = tableCFsMap; - return this; - } - public Set getNamespaces() { return this.namespaces; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setNamespaces(Set)} instead. - */ - @Deprecated - public ReplicationPeerConfig setNamespaces(Set namespaces) { - this.namespaces = namespaces; - return this; - } - public long getBandwidth() { return this.bandwidth; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setBandwidth(long)} instead. - */ - @Deprecated - public ReplicationPeerConfig setBandwidth(long bandwidth) { - this.bandwidth = bandwidth; - return this; - } - public boolean replicateAllUserTables() { return this.replicateAllUserTables; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setReplicateAllUserTables(boolean)} instead. - */ - @Deprecated - public ReplicationPeerConfig setReplicateAllUserTables(boolean replicateAllUserTables) { - this.replicateAllUserTables = replicateAllUserTables; - return this; - } - public Map> getExcludeTableCFsMap() { return (Map>) excludeTableCFsMap; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setExcludeTableCFsMap(Map)} instead. - */ - @Deprecated - public ReplicationPeerConfig setExcludeTableCFsMap(Map> tableCFsMap) { - this.excludeTableCFsMap = tableCFsMap; - return this; - } - public Set getExcludeNamespaces() { return this.excludeNamespaces; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setExcludeNamespaces(Set)} instead. - */ - @Deprecated - public ReplicationPeerConfig setExcludeNamespaces(Set namespaces) { - this.excludeNamespaces = namespaces; - return this; - } - public String getRemoteWALDir() { return this.remoteWALDir; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java index 74b5c2fbd3c8..479fe6b35a43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; @@ -108,10 +109,8 @@ public void clearPeerAndQueues() throws IOException, ReplicationException { @Test public void testAddRemovePeer() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); - ReplicationPeerConfig rpc2 = new ReplicationPeerConfig(); - rpc2.setClusterKey(KEY_TWO); + ReplicationPeerConfig rpc1 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(); + ReplicationPeerConfig rpc2 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_TWO).build(); // Add a valid peer admin.addReplicationPeer(ID_ONE, rpc1).join(); // try adding the same (fails) @@ -142,10 +141,11 @@ public void testAddRemovePeer() throws Exception { @Test public void testPeerConfig() throws Exception { - ReplicationPeerConfig config = new ReplicationPeerConfig(); - config.setClusterKey(KEY_ONE); - config.getConfiguration().put("key1", "value1"); - config.getConfiguration().put("key2", "value2"); + ReplicationPeerConfig config = ReplicationPeerConfig.newBuilder() + .setClusterKey(KEY_ONE) + .putConfiguration("key1", "value1") + .putConfiguration("key2", "value2") + .build(); admin.addReplicationPeer(ID_ONE, config).join(); List peers = admin.listReplicationPeers().get(); @@ -160,8 +160,7 @@ public void testPeerConfig() throws Exception { @Test public void testEnableDisablePeer() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); + ReplicationPeerConfig rpc1 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(); admin.addReplicationPeer(ID_ONE, rpc1).join(); List peers = admin.listReplicationPeers().get(); assertEquals(1, peers.size()); @@ -176,8 +175,8 @@ public void testEnableDisablePeer() throws Exception { @Test public void testAppendPeerTableCFs() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); final TableName tableName1 = TableName.valueOf(tableName.getNameAsString() + "t1"); final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "t2"); final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "t3"); @@ -186,9 +185,9 @@ public void testAppendPeerTableCFs() throws Exception { final TableName tableName6 = TableName.valueOf(tableName.getNameAsString() + "t6"); // Add a valid peer - admin.addReplicationPeer(ID_ONE, rpc1).join(); - rpc1.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc1).join(); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); Map> tableCFs = new HashMap<>(); @@ -280,16 +279,16 @@ public void testAppendPeerTableCFs() throws Exception { @Test public void testRemovePeerTableCFs() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); final TableName tableName1 = TableName.valueOf(tableName.getNameAsString() + "t1"); final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "t2"); final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "t3"); final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "t4"); // Add a valid peer - admin.addReplicationPeer(ID_ONE, rpc1).join(); - rpc1.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc1).join(); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); Map> tableCFs = new HashMap<>(); try { @@ -369,30 +368,28 @@ public void testSetPeerNamespaces() throws Exception { String ns1 = "ns1"; String ns2 = "ns2"; - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(KEY_ONE); - admin.addReplicationPeer(ID_ONE, rpc).join(); - rpc.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); // add ns1 and ns2 to peer config - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); Set namespaces = new HashSet<>(); namespaces.add(ns1); namespaces.add(ns2); - rpc.setNamespaces(namespaces); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + rpcBuilder.setNamespaces(namespaces); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); namespaces = admin.getReplicationPeerConfig(ID_ONE).get().getNamespaces(); assertEquals(2, namespaces.size()); assertTrue(namespaces.contains(ns1)); assertTrue(namespaces.contains(ns2)); // update peer config only contains ns1 - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); namespaces = new HashSet<>(); namespaces.add(ns1); - rpc.setNamespaces(namespaces); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + rpcBuilder.setNamespaces(namespaces); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); namespaces = admin.getReplicationPeerConfig(ID_ONE).get().getNamespaces(); assertEquals(1, namespaces.size()); assertTrue(namespaces.contains(ns1)); @@ -407,40 +404,36 @@ public void testNamespacesAndTableCfsConfigConflict() throws Exception { final TableName tableName1 = TableName.valueOf(ns1 + ":" + tableName.getNameAsString() + "1"); final TableName tableName2 = TableName.valueOf(ns2 + ":" + tableName.getNameAsString() + "2"); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(KEY_ONE); - admin.addReplicationPeer(ID_ONE, rpc).join(); - rpc.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); Set namespaces = new HashSet(); namespaces.add(ns1); - rpc.setNamespaces(namespaces); - admin.updateReplicationPeerConfig(ID_ONE, rpc).get(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); + rpcBuilder.setNamespaces(namespaces); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).get(); Map> tableCfs = new HashMap<>(); tableCfs.put(tableName1, new ArrayList<>()); - rpc.setTableCFsMap(tableCfs); + rpcBuilder.setTableCFsMap(tableCfs); try { - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); fail( "Test case should fail, because table " + tableName1 + " conflict with namespace " + ns1); } catch (CompletionException e) { // OK } - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); tableCfs.clear(); tableCfs.put(tableName2, new ArrayList<>()); - rpc.setTableCFsMap(tableCfs); - admin.updateReplicationPeerConfig(ID_ONE, rpc).get(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); + rpcBuilder.setTableCFsMap(tableCfs); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).get(); namespaces.clear(); namespaces.add(ns2); - rpc.setNamespaces(namespaces); + rpcBuilder.setNamespaces(namespaces); try { - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); fail( "Test case should fail, because namespace " + ns2 + " conflict with table " + tableName2); } catch (CompletionException e) { @@ -452,15 +445,14 @@ public void testNamespacesAndTableCfsConfigConflict() throws Exception { @Test public void testPeerBandwidth() throws Exception { - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(KEY_ONE); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); - admin.addReplicationPeer(ID_ONE, rpc).join(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); - assertEquals(0, rpc.getBandwidth()); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join();; + assertEquals(0, admin.getReplicationPeerConfig(ID_ONE).get().getBandwidth()); - rpc.setBandwidth(2097152); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + rpcBuilder.setBandwidth(2097152); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); assertEquals(2097152, admin.getReplicationPeerConfig(ID_ONE).join().getBandwidth()); admin.removeReplicationPeer(ID_ONE).join(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java index 1fb9df66abb0..c9599630ba76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java @@ -24,8 +24,8 @@ import static org.junit.Assert.fail; import java.io.IOException; -import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CompletionException; import java.util.concurrent.ForkJoinPool; @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -86,8 +87,8 @@ public static void setUpBeforeClass() throws Exception { ConnectionFactory.createAsyncConnection(TEST_UTIL2.getConfiguration()).get(); admin2 = connection.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(TEST_UTIL2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(TEST_UTIL2.getClusterKey()).build(); ASYNC_CONN.getAdmin().addReplicationPeer(ID_SECOND, rpc).join(); } @@ -231,30 +232,30 @@ public void testEnableReplicationForExplicitSetTableCfs() throws Exception { assertFalse("Table should not exists in the peer cluster", admin2.tableExists(tableName2).get()); - Map> tableCfs = new HashMap<>(); + Map> tableCfs = new HashMap<>(); tableCfs.put(tableName, null); - ReplicationPeerConfig rpc = admin.getReplicationPeerConfig(ID_SECOND).get(); - rpc.setReplicateAllUserTables(false); - rpc.setTableCFsMap(tableCfs); + ReplicationPeerConfigBuilder rpcBuilder = ReplicationPeerConfig + .newBuilder(admin.getReplicationPeerConfig(ID_SECOND).get()) + .setReplicateAllUserTables(false) + .setTableCFsMap(tableCfs); try { // Only add tableName to replication peer config - admin.updateReplicationPeerConfig(ID_SECOND, rpc).join(); + admin.updateReplicationPeerConfig(ID_SECOND, rpcBuilder.build()).join(); admin.enableTableReplication(tableName2).join(); assertFalse("Table should not be created if user has set table cfs explicitly for the " + "peer and this is not part of that collection", admin2.tableExists(tableName2).get()); // Add tableName2 to replication peer config, too tableCfs.put(tableName2, null); - rpc.setTableCFsMap(tableCfs); - admin.updateReplicationPeerConfig(ID_SECOND, rpc).join(); + rpcBuilder.setTableCFsMap(tableCfs); + admin.updateReplicationPeerConfig(ID_SECOND, rpcBuilder.build()).join(); admin.enableTableReplication(tableName2).join(); assertTrue( "Table should be created if user has explicitly added table into table cfs collection", admin2.tableExists(tableName2).get()); } finally { - rpc.setTableCFsMap(null); - rpc.setReplicateAllUserTables(true); - admin.updateReplicationPeerConfig(ID_SECOND, rpc).join(); + rpcBuilder.setTableCFsMap(null).setReplicateAllUserTables(true).build(); + admin.updateReplicationPeerConfig(ID_SECOND, rpcBuilder.build()).join(); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 491612c6be95..99180ec8bad1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -395,8 +395,8 @@ public void testReplicaAndReplication() throws Exception { try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Admin admin = connection.getAdmin()) { - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(HTU2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(HTU2.getClusterKey()).build(); admin.addReplicationPeer("2", rpc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index b2e0e6d4860e..9baa600ca10b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -623,7 +623,8 @@ private void addPeer(String id, int masterClusterNumber, try (Connection conn = ConnectionFactory.createConnection(configurations[masterClusterNumber]); Admin admin = conn.getAdmin()) { admin.addReplicationPeer(id, - new ReplicationPeerConfig().setClusterKey(utilities[slaveClusterNumber].getClusterKey())); + ReplicationPeerConfig.newBuilder(). + setClusterKey(utilities[slaveClusterNumber].getClusterKey()).build()); } } @@ -633,9 +634,10 @@ private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber, Admin admin = conn.getAdmin()) { admin.addReplicationPeer( id, - new ReplicationPeerConfig().setClusterKey(utilities[slaveClusterNumber].getClusterKey()) - .setReplicateAllUserTables(false) - .setTableCFsMap(ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs))); + ReplicationPeerConfig.newBuilder() + .setClusterKey(utilities[slaveClusterNumber].getClusterKey()) + .setReplicateAllUserTables(false) + .setTableCFsMap(ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs)).build()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index 322db2e9e3ba..b3e4a1f4f5f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -145,9 +145,9 @@ public void testMultiSlaveReplication() throws Exception { Table htable2 = utility2.getConnection().getTable(tableName); Table htable3 = utility3.getConnection().getTable(tableName); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility2.getClusterKey()); - admin1.addReplicationPeer("1", rpc); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()); + admin1.addReplicationPeer("1", rpcBuilder.build()); // put "row" and wait 'til it got around, then delete putAndWait(row, famName, htable1, htable2); @@ -163,9 +163,8 @@ public void testMultiSlaveReplication() throws Exception { // after the log was rolled put a new row putAndWait(row3, famName, htable1, htable2); - rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility3.getClusterKey()); - admin1.addReplicationPeer("2", rpc); + rpcBuilder.setClusterKey(utility3.getClusterKey()); + admin1.addReplicationPeer("2", rpcBuilder.build()); // put a row, check it was replicated to all clusters putAndWait(row1, famName, htable1, htable2, htable3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index 5a6ac0c48745..b972c5f3cf3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -142,8 +142,10 @@ public String explainFailure() throws Exception { public void testCustomReplicationEndpoint() throws Exception { // test installing a custom replication endpoint other than the default one. hbaseAdmin.addReplicationPeer("testCustomReplicationEndpoint", - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()) + .build()); // check whether the class has been constructed and started Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { @@ -184,8 +186,10 @@ public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception { int peerCount = hbaseAdmin.listReplicationPeers().size(); final String id = "testReplicationEndpointReturnsFalseOnReplicate"; hbaseAdmin.addReplicationPeer(id, - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName())); + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()) + .build()); // This test is flakey and then there is so much stuff flying around in here its, hard to // debug. Peer needs to be up for the edit to make it across. This wait on // peer count seems to be a hack that has us not progress till peer is up. @@ -236,8 +240,10 @@ public void testInterClusterReplication() throws Exception { } hbaseAdmin.addReplicationPeer(id, - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) - .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) + .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()) + .build()); final int numEdits = totEdits; Waiter.waitFor(CONF1, 30000, new Waiter.ExplainingPredicate() { @@ -260,13 +266,15 @@ public String explainFailure() throws Exception { @Test public void testWALEntryFilterFromReplicationEndpoint() throws Exception { - ReplicationPeerConfig rpc = - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()); - // test that we can create mutliple WALFilters reflectively - rpc.getConfiguration().put(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - EverythingPassesWALEntryFilter.class.getName() + "," + - EverythingPassesWALEntryFilterSubclass.class.getName()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + EverythingPassesWALEntryFilter.class.getName() + "," + + EverythingPassesWALEntryFilterSubclass.class.getName()) + .build(); + hbaseAdmin.addReplicationPeer("testWALEntryFilterFromReplicationEndpoint", rpc); // now replicate some data. try (Connection connection = ConnectionFactory.createConnection(CONF1)) { @@ -290,23 +298,25 @@ public boolean evaluate() throws Exception { @Test(expected = IOException.class) public void testWALEntryFilterAddValidation() throws Exception { - ReplicationPeerConfig rpc = - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()); - // test that we can create mutliple WALFilters reflectively - rpc.getConfiguration().put(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter"); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); hbaseAdmin.addReplicationPeer("testWALEntryFilterAddValidation", rpc); } @Test(expected = IOException.class) public void testWALEntryFilterUpdateValidation() throws Exception { - ReplicationPeerConfig rpc = - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()); - // test that we can create mutliple WALFilters reflectively - rpc.getConfiguration().put(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter"); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); hbaseAdmin.updateReplicationPeerConfig("testWALEntryFilterUpdateValidation", rpc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index d416e09f2554..d61966f70a78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -143,8 +143,8 @@ public static void setUpBeforeClass() throws Exception { connection1 = ConnectionFactory.createConnection(conf1); replicationAdmin = connection1.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(utility2.getClusterKey()).build(); replicationAdmin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java index 1538fa360093..f528bdaad097 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java @@ -101,8 +101,8 @@ public static void setUpBeforeClass() throws Exception { utility2.setZkCluster(miniZK); new ZKWatcher(conf2, "cluster2", null, true); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(utility2.getClusterKey()).build(); utility1.startMiniCluster(); utility2.startMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index bfdbb8864726..ce47f0b71481 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -71,8 +71,8 @@ public void testReplicatorBatching() throws Exception { // Replace the peer set up for us by the base class with a wrapper for this test hbaseAdmin.addReplicationPeer("testReplicatorBatching", - new ReplicationPeerConfig().setClusterKey(UTIL2.getClusterKey()) - .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()).build()); ReplicationEndpointForTest.setBatchCount(0); ReplicationEndpointForTest.setEntriesCount(0); @@ -120,8 +120,9 @@ public void testReplicatorWithErrors() throws Exception { // Replace the peer set up for us by the base class with a wrapper for this test hbaseAdmin.addReplicationPeer("testReplicatorWithErrors", - new ReplicationPeerConfig().setClusterKey(UTIL2.getClusterKey()) - .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName()) + .build()); FailureInjectingReplicationEndpointForTest.setBatchCount(0); FailureInjectingReplicationEndpointForTest.setEntriesCount(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 17276173ec70..905cb48fe77c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -3056,7 +3056,8 @@ public void testUpdateReplicationPeerConfig() throws Exception { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preUpdateReplicationPeerConfig( - ObserverContextImpl.createAndPrepare(CP_ENV), "test", new ReplicationPeerConfig()); + ObserverContextImpl.createAndPrepare(CP_ENV), "test", + ReplicationPeerConfig.newBuilder().build()); return null; } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 10b8cf56106c..31f219c36c12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -137,8 +137,9 @@ public void setup() throws Exception { TEST_UTIL1.startMiniCluster(1); admin = TEST_UTIL.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(TEST_UTIL1.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(TEST_UTIL1.getClusterKey()) + .build(); admin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index 012c9aad5676..b843f6e3bff4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -190,8 +190,8 @@ public void setup() throws Exception { TEST_UTIL1.startMiniCluster(1); admin = TEST_UTIL.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(TEST_UTIL1.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(TEST_UTIL1.getClusterKey()).build(); admin.addReplicationPeer("2", rpc); Admin hBaseAdmin = TEST_UTIL.getAdmin(); From 4a66f68f9c6d5d7e25c70aa563137a40cc0c5cac Mon Sep 17 00:00:00 2001 From: Aman Poonia Date: Tue, 26 Jan 2021 11:55:12 +0530 Subject: [PATCH 344/769] HBASE-25523 Region normalizer chore thread is getting killed (#2903) Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani --- .../master/normalizer/SimpleRegionNormalizer.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 1675e049d77d..61bc922c5ac4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Size; import org.apache.hadoop.hbase.TableName; @@ -225,8 +226,16 @@ public List computePlansForTable(final TableName table) { private long getRegionSizeMB(RegionInfo hri) { ServerName sn = masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); - RegionMetrics regionLoad = - masterServices.getServerManager().getLoad(sn).getRegionMetrics().get(hri.getRegionName()); + if (sn == null) { + LOG.debug("{} region was not found on any Server", hri.getRegionNameAsString()); + return -1; + } + ServerMetrics serverMetrics = masterServices.getServerManager().getLoad(sn); + if (serverMetrics == null) { + LOG.debug("server {} was not found in ServerManager", sn.getServerName()); + return -1; + } + RegionMetrics regionLoad = serverMetrics.getRegionMetrics().get(hri.getRegionName()); if (regionLoad == null) { LOG.debug("{} was not found in RegionsLoad", hri.getRegionNameAsString()); return -1; From f9ef6633148dc98192cf73e8d53c9270c5dd4e4d Mon Sep 17 00:00:00 2001 From: Mallikarjun Date: Tue, 26 Jan 2021 12:08:55 +0530 Subject: [PATCH 345/769] HBASE-25501 BugFix: Unused backup bandwidth and workers parameter (#2886) Signed-off-by: Viraj Jasani --- .../backup/impl/FullTableBackupClient.java | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index c0103f5db31f..5bf1373a6e53 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -87,11 +88,21 @@ protected void snapshotCopy(BackupInfo backupInfo) throws Exception { // calculate the real files' size for the percentage in the future. // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); int res; - String[] args = new String[4]; - args[0] = "-snapshot"; - args[1] = backupInfo.getSnapshotName(table); - args[2] = "-copy-to"; - args[3] = backupInfo.getTableBackupDir(table); + ArrayList argsList = new ArrayList<>(); + argsList.add("-snapshot"); + argsList.add(backupInfo.getSnapshotName(table)); + argsList.add("-copy-to"); + argsList.add(backupInfo.getTableBackupDir(table)); + if (backupInfo.getBandwidth() > -1) { + argsList.add("-bandwidth"); + argsList.add(String.valueOf(backupInfo.getBandwidth())); + } + if (backupInfo.getWorkers() > -1) { + argsList.add("-mappers"); + argsList.add(String.valueOf(backupInfo.getWorkers())); + } + + String[] args = argsList.toArray(new String[0]); String jobname = "Full-Backup_" + backupInfo.getBackupId() + "_" + table.getNameAsString(); if (LOG.isDebugEnabled()) { From 14928d22e9b085adebc9b1193885daae4e9405df Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Tue, 26 Jan 2021 11:21:02 -0800 Subject: [PATCH 346/769] HBASE-25532 Add 2.4.1 to the downloads page Signed-off-by: Andrew Purtell --- src/site/xdoc/downloads.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index a4123ba5d559..72cbeb42e3e7 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -45,24 +45,24 @@ under the License. - 2.4.0 + 2.4.1 - 2020/12/15 + 2021/01/26 - 2.4.0 vs 2.3.0 + 2.4.1 vs 2.4.0 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) From b07549febb462b072792659051c64bb54d122771 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Tue, 26 Jan 2021 16:14:53 -0800 Subject: [PATCH 347/769] HBASE-25531 Minor improvement to Profiler Servlet doc (#2905) Signed-off-by: Sean Busbey --- src/main/asciidoc/_chapters/profiler.adoc | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/main/asciidoc/_chapters/profiler.adoc b/src/main/asciidoc/_chapters/profiler.adoc index 522cc7deed6d..9c9911ce7519 100644 --- a/src/main/asciidoc/_chapters/profiler.adoc +++ b/src/main/asciidoc/_chapters/profiler.adoc @@ -29,20 +29,26 @@ == Background -HBASE-21926 introduced a new servlet that supports integrated profiling via async-profiler. +https://issues.apache.org/jira/browse/HBASE-21926[HBASE-21926] introduced a new servlet that +supports integrated, on-demand profiling via the +https://github.com/jvm-profiling-tools/async-profiler[Async Profiler] project. == Prerequisites -Go to https://github.com/jvm-profiling-tools/async-profiler, download a release appropriate for your platform, and install on every cluster host. -If 4.6 or later linux, be sure to set proc variables as per 'Basic Usage' section in the -Async Profiler Home Page -(Not doing this will draw you diagrams with no content). +Go to the https://github.com/jvm-profiling-tools/async-profiler[Async Profiler Home Page], download +a release appropriate for your platform, and install on every cluster host. If running a Linux +kernel v4.6 or later, be sure to set proc variables as per the +https://github.com/jvm-profiling-tools/async-profiler#basic-usage[Basic Usage] section. Not doing +so will result in flame graphs that contain no content. -Set `ASYNC_PROFILER_HOME` in the environment (put it in hbase-env.sh) to the root directory of the async-profiler install location, or pass it on the HBase daemon's command line as a system property as `-Dasync.profiler.home=/path/to/async-profiler`. +Set `ASYNC_PROFILER_HOME` in the environment (put it in hbase-env.sh) to the root directory of the +async-profiler install location, or pass it on the HBase daemon's command line as a system property +as `-Dasync.profiler.home=/path/to/async-profiler`. == Usage -Once the prerequisites are satisfied, access to async-profiler is available by way of the HBase UI or direct interaction with the infoserver. +Once the prerequisites are satisfied, access to async-profiler is available by way of the HBase UI +or direct interaction with the infoserver. Examples: From 4aff481318a6424ba89bdd44e6af1e702319ff20 Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Thu, 28 Jan 2021 22:55:05 +0800 Subject: [PATCH 348/769] HBASE-25506 ServerManager#startChore affects MTTR of HMaster (#2889) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/master/ServerManager.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index f91f04000cd1..7bbfd0bb55d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -921,8 +921,13 @@ public boolean isClusterShutdown() { public void startChore() { Configuration c = master.getConfiguration(); if (persistFlushedSequenceId) { - // when reach here, RegionStates should loaded, firstly, we call remove deleted regions - removeDeletedRegionFromLoadedFlushedSequenceIds(); + new Thread(() -> { + // after AM#loadMeta, RegionStates should be loaded, and some regions are + // deleted by drop/split/merge during removeDeletedRegionFromLoadedFlushedSequenceIds, + // but these deleted regions are not added back to RegionStates, + // so we can safely remove deleted regions. + removeDeletedRegionFromLoadedFlushedSequenceIds(); + }, "RemoveDeletedRegionSyncThread").start(); int flushPeriod = c.getInt(FLUSHEDSEQUENCEID_FLUSHER_INTERVAL, FLUSHEDSEQUENCEID_FLUSHER_INTERVAL_DEFAULT); flushedSeqIdFlusher = new FlushedSequenceIdFlusher( From de51a40b53ec16d1847537211f4e9b1e2808ac66 Mon Sep 17 00:00:00 2001 From: Bharath Vissapragada Date: Thu, 28 Jan 2021 09:33:35 -0800 Subject: [PATCH 349/769] HBASE-25528: Dedicated merge dispatch threadpool on master (#2904) Adds "hbase.master.executor.merge.dispatch.threads" and defaults to 2. Also adds additional logging that includes the number of split plans and merge plans computed for each normalizer run. Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/HConstants.java | 7 +++++++ .../org/apache/hadoop/hbase/executor/EventType.java | 2 +- .../apache/hadoop/hbase/executor/ExecutorType.java | 1 + .../org/apache/hadoop/hbase/master/HMaster.java | 3 +++ .../master/normalizer/SimpleRegionNormalizer.java | 13 ++++++++++--- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index d31cadd85299..48fa00caaa14 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1605,6 +1605,13 @@ public enum OperationStatusCode { "hbase.master.executor.serverops.threads"; public static final int MASTER_SERVER_OPERATIONS_THREADS_DEFAULT = 5; + /** + * Number of threads used to dispatch merge operations to the regionservers. + */ + public static final String MASTER_MERGE_DISPATCH_THREADS = + "hbase.master.executor.merge.dispatch.threads"; + public static final int MASTER_MERGE_DISPATCH_THREADS_DEFAULT = 2; + public static final String MASTER_META_SERVER_OPERATIONS_THREADS = "hbase.master.executor.meta.serverops.threads"; public static final int MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT = 5; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index a67447940b9d..600c96cc0267 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -152,7 +152,7 @@ public enum EventType { * C_M_MERGE_REGION
    * Client asking Master to merge regions. */ - C_M_MERGE_REGION (30, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_MERGE_REGION (30, ExecutorType.MASTER_MERGE_OPERATIONS), /** * Messages originating from Client to Master.
    * C_M_DELETE_TABLE
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index d06bd54484d7..36958c518a68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -35,6 +35,7 @@ public enum ExecutorType { MASTER_META_SERVER_OPERATIONS (6), M_LOG_REPLAY_OPS (7), MASTER_SNAPSHOT_OPERATIONS (8), + MASTER_MERGE_OPERATIONS (9), // RegionServer executor services RS_OPEN_REGION (20), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 94f3bf2bfda7..9911f014d639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1324,6 +1324,9 @@ private void startServiceThreads() throws IOException { HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT)); this.executorService.startExecutorService(ExecutorType.MASTER_SNAPSHOT_OPERATIONS, conf.getInt( SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT)); + this.executorService.startExecutorService(ExecutorType.MASTER_MERGE_OPERATIONS, conf.getInt( + HConstants.MASTER_MERGE_DISPATCH_THREADS, + HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT)); // We depend on there being only one instance of this executor running // at a time. To do concurrency, would need fencing of enable/disable of diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 61bc922c5ac4..52455686895f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -209,14 +209,21 @@ public List computePlansForTable(final TableName table) { ctx.getTableRegions().size()); final List plans = new ArrayList<>(); + int splitPlansCount = 0; if (proceedWithSplitPlanning) { - plans.addAll(computeSplitNormalizationPlans(ctx)); + List splitPlans = computeSplitNormalizationPlans(ctx); + splitPlansCount = splitPlans.size(); + plans.addAll(splitPlans); } + int mergePlansCount = 0; if (proceedWithMergePlanning) { - plans.addAll(computeMergeNormalizationPlans(ctx)); + List mergePlans = computeMergeNormalizationPlans(ctx); + mergePlansCount = mergePlans.size(); + plans.addAll(mergePlans); } - LOG.debug("Computed {} normalization plans for table {}", plans.size(), table); + LOG.debug("Computed normalization plans for table {}. Total plans: {}, split plans: {}, " + + "merge plans: {}", table, plans.size(), splitPlansCount, mergePlansCount); return plans; } From d234b4dec2d50c9184f3b4affaa68ec232445f29 Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Fri, 29 Jan 2021 04:17:30 -0800 Subject: [PATCH 350/769] [HBASE-25536] Remove 0 length wal file from logQueue if it belongs to old sources (#2908) Signed-off-by: Wellington Chevreuil Signed-off-by: Geoffrey Jacoby Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani --- .../ReplicationSourceWALReader.java | 4 ++- .../regionserver/TestWALEntryStream.java | 30 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index a6d87870b495..be262a6d9504 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -247,8 +247,10 @@ private void handleEmptyWALEntryBatch() throws InterruptedException { // (highly likely we've closed the current log), we've hit the max retries, and autorecovery is // enabled, then dump the log private void handleEofException(IOException e) { + // Dump the log even if logQueue size is 1 if the source is from recovered Source + // since we don't add current log to recovered source queue so it is safe to remove. if ((e instanceof EOFException || e.getCause() instanceof EOFException) && - logQueue.size() > 1 && this.eofAutoRecovery) { + (source.isRecovered() || logQueue.size() > 1) && this.eofAutoRecovery) { try { if (fs.getFileStatus(logQueue.peek()).getLen() == 0) { LOG.warn("Forcing removal of 0 length log in queue: " + logQueue.peek()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java index 63e7a8b90496..1db9c175e922 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java @@ -42,6 +42,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -652,4 +653,33 @@ public void testReadBeyondCommittedLength() throws IOException, InterruptedExcep assertFalse(entryStream.hasNext()); } } + + /* + Test removal of 0 length log from logQueue if the source is a recovered source and + size of logQueue is only 1. + */ + @Test + public void testEOFExceptionForRecoveredQueue() throws Exception { + PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); + // Create a 0 length log. + Path emptyLog = new Path("emptyLog"); + FSDataOutputStream fsdos = fs.create(emptyLog); + fsdos.close(); + assertEquals(0, fs.getFileStatus(emptyLog).getLen()); + queue.add(emptyLog); + + Configuration conf = new Configuration(CONF); + // Override the max retries multiplier to fail fast. + conf.setInt("replication.source.maxretriesmultiplier", 1); + conf.setBoolean("replication.source.eof.autorecovery", true); + // Create a reader thread with source as recovered source. + ReplicationSource source = mockReplicationSource(true, conf); + when(source.isPeerEnabled()).thenReturn(true); + ReplicationSourceWALReader reader = + new ReplicationSourceWALReader(fs, conf, queue, 0, getDummyFilter(), source); + reader.run(); + // ReplicationSourceWALReaderThread#handleEofException method will + // remove empty log from logQueue. + assertEquals(0, queue.size()); + } } From e8775060ddb6c7fbf05d3d2bedc28110f127cf06 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 29 Jan 2021 23:27:31 +0800 Subject: [PATCH 351/769] HBASE-25533 The metadata of the table and family should not be an empty string (#2906) Signed-off-by: Viraj Jasani Signed-off-by: Geoffrey Jacoby --- .../client/ColumnFamilyDescriptorBuilder.java | 4 +- .../hbase/client/TableDescriptorBuilder.java | 6 +-- .../TestColumnFamilyDescriptorBuilder.java | 19 ++++++++ .../client/TestTableDescriptorBuilder.java | 18 +++++++ hbase-shell/src/test/ruby/hbase/admin_test.rb | 47 +++++++++++++++++++ 5 files changed, 89 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 9a47cb52fa95..7afc3872b465 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -677,7 +677,7 @@ private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { * @return this (for chained invocation) */ private ModifyableColumnFamilyDescriptor setValue(Bytes key, Bytes value) { - if (value == null) { + if (value == null || value.getLength() == 0) { values.remove(key); } else { values.put(key, value); @@ -1228,7 +1228,7 @@ public Map getConfiguration() { * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setConfiguration(String key, String value) { - if (value == null) { + if (value == null || value.length() == 0) { configuration.remove(key); } else { configuration.put(key, value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index d98386817148..2581ccea758b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -701,7 +701,7 @@ public ModifyableTableDescriptor setValue(String key, String value) { toBytesOrNull(value, Bytes::toBytes)); } - /* + /** * @param key The key. * @param value The value. If null, removes the setting. */ @@ -710,14 +710,14 @@ private ModifyableTableDescriptor setValue(final Bytes key, return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } - /* + /** * Setter for storing metadata as a (key, value) pair in {@link #values} map * * @param key The key. * @param value The value. If null, removes the setting. */ public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { - if (value == null) { + if (value == null || value.getLength() == 0) { values.remove(key); } else { values.put(key, value); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index 557d2f8dfb6e..7528d24705cf 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; @@ -210,6 +211,24 @@ public void testDefaultBuilder() { KeepDeletedCells.FALSE.toString()); assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING), DataBlockEncoding.NONE.toString()); + } + @Test + public void testSetEmptyValue() { + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY); + String testConf = "TestConfiguration"; + String testValue = "TestValue"; + // test set value + builder.setValue(testValue, "2"); + assertEquals("2", Bytes.toString(builder.build().getValue(Bytes.toBytes(testValue)))); + builder.setValue(testValue, ""); + assertNull(builder.build().getValue(Bytes.toBytes(testValue))); + + // test set configuration + builder.setConfiguration(testConf, "1"); + assertEquals("1", builder.build().getConfigurationValue(testConf)); + builder.setConfiguration(testConf, ""); + assertNull(builder.build().getConfigurationValue(testConf)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 43824afe8107..05a0b31d1e8a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -369,4 +369,22 @@ public void testGetSetRegionServerGroup() { htd = TableDescriptorBuilder.newBuilder(htd).setRegionServerGroup(null).build(); assertNull(htd.getValue(RSGroupInfo.TABLE_DESC_PROP_GROUP)); } + + @Test + public void testSetEmptyValue() { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + String testValue = "TestValue"; + // test setValue + builder.setValue(testValue, "2"); + assertEquals("2", builder.build().getValue(testValue)); + builder.setValue(testValue, ""); + assertNull(builder.build().getValue(Bytes.toBytes(testValue))); + + // test setFlushPolicyClassName + builder.setFlushPolicyClassName("class"); + assertEquals("class", builder.build().getFlushPolicyClassName()); + builder.setFlushPolicyClassName(""); + assertNull(builder.build().getFlushPolicyClassName()); + } } diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index 64a4a8b425c6..309624ae1808 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -1013,6 +1013,21 @@ def teardown assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) end + define_test "alter should be able to remove a list of table attributes when value is empty" do + drop_test_table(@test_name) + + key_1 = "TestAttr1" + key_2 = "TestAttr2" + command(:create, @test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 }) + + # eval() is used to convert a string to regex + assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + + command(:alter, @test_name, METADATA => { key_1 => '', key_2 => '' }) + assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + end define_test "alter should raise error trying to remove nonexistent attributes" do drop_test_table(@test_name) @@ -1064,6 +1079,38 @@ def teardown assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) end + define_test "alter should be able to remove a list of table configuration when value is empty" do + drop_test_table(@test_name) + + key_1 = "TestConf1" + key_2 = "TestConf2" + command(:create, @test_name, { NAME => 'i'}, CONFIGURATION => { key_1 => 1, key_2 => 2 }) + + # eval() is used to convert a string to regex + assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + + command(:alter, @test_name, CONFIGURATION => { key_1 => '', key_2 => '' }) + assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + end + + define_test "alter should be able to remove a list of column family configuration when value is empty" do + drop_test_table(@test_name) + + key_1 = "TestConf1" + key_2 = "TestConf2" + command(:create, @test_name, { NAME => 'i', CONFIGURATION => { key_1 => 1, key_2 => 2 }}) + + # eval() is used to convert a string to regex + assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + + command(:alter, @test_name, { NAME => 'i', CONFIGURATION => { key_1 => '', key_2 => '' }}) + assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + end + define_test "alter should raise error trying to remove nonexistent configurations" do drop_test_table(@test_name) create_test_table(@test_name) From 1765ff7f7689da6535325eb1fe3f2f0d55aa4492 Mon Sep 17 00:00:00 2001 From: SWH12 <34267571+SWH12@users.noreply.github.com> Date: Sun, 31 Jan 2021 06:13:45 +0800 Subject: [PATCH 352/769] HBASE-25498 Add a comment when configuring HTTPS (#2913) Incomplete configuration steps when using Secure HTTP (HTTPS) for the Web UI --- src/main/asciidoc/_chapters/security.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/security.adoc b/src/main/asciidoc/_chapters/security.adoc index 7953b75b6a37..4678072145b1 100644 --- a/src/main/asciidoc/_chapters/security.adoc +++ b/src/main/asciidoc/_chapters/security.adoc @@ -44,7 +44,7 @@ HBase provides mechanisms to secure various components and aspects of HBase and === Using Secure HTTP (HTTPS) for the Web UI A default HBase install uses insecure HTTP connections for Web UIs for the master and region servers. -To enable secure HTTP (HTTPS) connections instead, set `hbase.ssl.enabled` to `true` in _hbase-site.xml_. +To enable secure HTTP (HTTPS) connections instead, set `hbase.ssl.enabled` to `true` in _hbase-site.xml_(Please prepare SSL certificate and ssl configuration file in advance). This does not change the port used by the Web UI. To change the port for the web UI for a given HBase component, configure that port's setting in hbase-site.xml. These settings are: From a04ea7ea4493f5bc583b4d08a2a6a88e7c6b8c54 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Mon, 1 Feb 2021 10:37:57 +0530 Subject: [PATCH 353/769] HBASE-24900 Make retain assignment configurable during SCP (#2313) Retain assignment will be useful in non-cloud scenario where RegionServer and Datanode are deployed in same machine and will avoid remote read. Signed-off-by: Guanghao Zhang Signed-off-by: Anoop Sam John --- .../TransitRegionStateProcedure.java | 9 +- .../procedure/ServerCrashProcedure.java | 23 +- .../master/TestRetainAssignmentOnRestart.java | 240 ++++++++++++++++++ ...tainAssignmentOnRestartSplitWithoutZk.java | 38 +++ 4 files changed, 300 insertions(+), 10 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index 56e3215d3ab2..8ca1ee482e81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -414,13 +414,8 @@ public void reportTransition(MasterProcedureEnv env, RegionStateNode regionNode, // Should be called with RegionStateNode locked public void serverCrashed(MasterProcedureEnv env, RegionStateNode regionNode, - ServerName serverName) throws IOException { - // force to assign to a new candidate server - // AssignmentManager#regionClosedAbnormally will set region location to null - // TODO: the forceNewPlan flag not be persistent so if master crash then the flag will be lost. - // But assign to old server is not big deal because it not effect correctness. - // See HBASE-23035 for more details. - forceNewPlan = true; + ServerName serverName, boolean forceNewPlan) throws IOException { + this.forceNewPlan = forceNewPlan; if (remoteProc != null) { // this means we are waiting for the sub procedure, so wake it up remoteProc.serverCrashed(env, regionNode, serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 655974489f68..e7fba555c9cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -65,6 +65,21 @@ public class ServerCrashProcedure implements ServerProcedureInterface { private static final Logger LOG = LoggerFactory.getLogger(ServerCrashProcedure.class); + /** + * Configuration parameter to enable/disable the retain region assignment during + * ServerCrashProcedure. + *

    + * By default retain assignment is disabled which makes the failover faster and improve the + * availability; useful for cloud scenario where region block locality is not important. Enable + * this when RegionServers are deployed on same host where Datanode are running, this will improve + * read performance due to local read. + *

    + * see HBASE-24900 for more details. + */ + public static final String MASTER_SCP_RETAIN_ASSIGNMENT = "hbase.master.scp.retain.assignment"; + /** Default value of {@link #MASTER_SCP_RETAIN_ASSIGNMENT} */ + public static final boolean DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT = false; + /** * Name of the crashed server to process. */ @@ -486,6 +501,8 @@ protected boolean isMatchingRegionLocation(RegionStateNode rsn) { */ private void assignRegions(MasterProcedureEnv env, List regions) throws IOException { AssignmentManager am = env.getMasterServices().getAssignmentManager(); + boolean retainAssignment = env.getMasterConfiguration().getBoolean(MASTER_SCP_RETAIN_ASSIGNMENT, + DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT); for (RegionInfo region : regions) { RegionStateNode regionNode = am.getRegionStates().getOrCreateRegionStateNode(region); regionNode.lock(); @@ -512,7 +529,8 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr } if (regionNode.getProcedure() != null) { LOG.info("{} found RIT {}; {}", this, regionNode.getProcedure(), regionNode); - regionNode.getProcedure().serverCrashed(env, regionNode, getServerName()); + regionNode.getProcedure().serverCrashed(env, regionNode, getServerName(), + !retainAssignment); continue; } if (env.getMasterServices().getTableStateManager() @@ -531,9 +549,8 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr LOG.warn("Found table disabled for region {}, procDetails: {}", regionNode, this); continue; } - // force to assign to a new candidate server, see HBASE-23035 for more details. TransitRegionStateProcedure proc = - TransitRegionStateProcedure.assign(env, region, true, null); + TransitRegionStateProcedure.assign(env, region, !retainAssignment, null); regionNode.setProcedure(proc); addChildProcedure(proc); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java new file mode 100644 index 000000000000..ee4da743d215 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRetainAssignmentOnRestart extends AbstractTestRestartCluster { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRetainAssignmentOnRestart.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestRetainAssignmentOnRestart.class); + + private static int NUM_OF_RS = 3; + + @Override + protected boolean splitWALCoordinatedByZk() { + return true; + } + + /** + * This tests retaining assignments on a cluster restart + */ + @Test + public void testRetainAssignmentOnClusterRestart() throws Exception { + setupCluster(); + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + MiniHBaseCluster cluster = UTIL.getHBaseCluster(); + List threads = cluster.getLiveRegionServerThreads(); + assertEquals(NUM_OF_RS, threads.size()); + int[] rsPorts = new int[NUM_OF_RS]; + for (int i = 0; i < NUM_OF_RS; i++) { + rsPorts[i] = threads.get(i).getRegionServer().getServerName().getPort(); + } + + // We don't have to use SnapshotOfRegionAssignmentFromMeta. We use it here because AM used to + // use it to load all user region placements + SnapshotOfRegionAssignmentFromMeta snapshot = + new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map regionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + for (ServerName serverName : regionToRegionServerMap.values()) { + boolean found = false; // Test only, no need to optimize + for (int k = 0; k < NUM_OF_RS && !found; k++) { + found = serverName.getPort() == rsPorts[k]; + } + assertTrue(found); + } + + LOG.info("\n\nShutting down HBase cluster"); + cluster.stopMaster(0); + cluster.shutdown(); + cluster.waitUntilShutDown(); + + LOG.info("\n\nSleeping a bit"); + Thread.sleep(2000); + + LOG.info("\n\nStarting cluster the second time with the same ports"); + cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 3); + master = cluster.startMaster().getMaster(); + for (int i = 0; i < NUM_OF_RS; i++) { + cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, rsPorts[i]); + cluster.startRegionServer(); + } + + ensureServersWithSamePort(master, rsPorts); + + // Wait till master is initialized and all regions are assigned + for (TableName TABLE : TABLES) { + UTIL.waitTableAvailable(TABLE); + } + UTIL.waitUntilNoRegionsInTransition(60000); + + snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size()); + for (Map.Entry entry : newRegionToRegionServerMap.entrySet()) { + ServerName oldServer = regionToRegionServerMap.get(entry.getKey()); + ServerName currentServer = entry.getValue(); + LOG.info( + "Key=" + entry.getKey() + " oldServer=" + oldServer + ", currentServer=" + currentServer); + assertEquals(entry.getKey().toString(), oldServer.getAddress(), currentServer.getAddress()); + assertNotEquals(oldServer.getStartcode(), currentServer.getStartcode()); + } + } + + /** + * This tests retaining assignments on a single node restart + */ + @Test + public void testRetainAssignmentOnSingleRSRestart() throws Exception { + setupCluster(); + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + MiniHBaseCluster cluster = UTIL.getHBaseCluster(); + List threads = cluster.getLiveRegionServerThreads(); + assertEquals(NUM_OF_RS, threads.size()); + int[] rsPorts = new int[NUM_OF_RS]; + for (int i = 0; i < NUM_OF_RS; i++) { + rsPorts[i] = threads.get(i).getRegionServer().getServerName().getPort(); + } + + // We don't have to use SnapshotOfRegionAssignmentFromMeta. We use it here because AM used to + // use it to load all user region placements + SnapshotOfRegionAssignmentFromMeta snapshot = + new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map regionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + for (ServerName serverName : regionToRegionServerMap.values()) { + boolean found = false; // Test only, no need to optimize + for (int k = 0; k < NUM_OF_RS && !found; k++) { + found = serverName.getPort() == rsPorts[k]; + } + assertTrue(found); + } + + // Server to be restarted + ServerName deadRS = threads.get(0).getRegionServer().getServerName(); + LOG.info("\n\nStopping HMaster and {} server", deadRS); + // Stopping master first so that region server SCP will not be initiated + cluster.stopMaster(0); + cluster.waitForMasterToStop(master.getServerName(), 5000); + cluster.stopRegionServer(deadRS); + + LOG.info("\n\nSleeping a bit"); + Thread.sleep(2000); + + LOG.info("\n\nStarting HMaster and region server {} second time with the same port", deadRS); + cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 3); + master = cluster.startMaster().getMaster(); + cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, deadRS.getPort()); + cluster.startRegionServer(); + + ensureServersWithSamePort(master, rsPorts); + + // Wait till master is initialized and all regions are assigned + for (TableName TABLE : TABLES) { + UTIL.waitTableAvailable(TABLE); + } + UTIL.waitUntilNoRegionsInTransition(60000); + + snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size()); + for (Map.Entry entry : newRegionToRegionServerMap.entrySet()) { + ServerName oldServer = regionToRegionServerMap.get(entry.getKey()); + ServerName currentServer = entry.getValue(); + LOG.info( + "Key=" + entry.getKey() + " oldServer=" + oldServer + ", currentServer=" + currentServer); + assertEquals(entry.getKey().toString(), oldServer.getAddress(), currentServer.getAddress()); + + if (deadRS.getPort() == oldServer.getPort()) { + // Restarted RS start code wont be same + assertNotEquals(oldServer.getStartcode(), currentServer.getStartcode()); + } else { + assertEquals(oldServer.getStartcode(), currentServer.getStartcode()); + } + } + } + + private void setupCluster() throws Exception, IOException, InterruptedException { + // Set Zookeeper based connection registry since we will stop master and start a new master + // without populating the underlying config for the connection. + UTIL.getConfiguration().set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + HConstants.ZK_CONNECTION_REGISTRY_CLASS); + // Enable retain assignment during ServerCrashProcedure + UTIL.getConfiguration().setBoolean(ServerCrashProcedure.MASTER_SCP_RETAIN_ASSIGNMENT, true); + UTIL.startMiniCluster(NUM_OF_RS); + + // Turn off balancer + UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false); + + LOG.info("\n\nCreating tables"); + for (TableName TABLE : TABLES) { + UTIL.createTable(TABLE, FAMILY); + } + for (TableName TABLE : TABLES) { + UTIL.waitTableEnabled(TABLE); + } + + UTIL.getMiniHBaseCluster().getMaster(); + UTIL.waitUntilNoRegionsInTransition(60000); + } + + private void ensureServersWithSamePort(HMaster master, int[] rsPorts) { + // Make sure live regionservers are on the same host/port + List localServers = master.getServerManager().getOnlineServersList(); + assertEquals(NUM_OF_RS, localServers.size()); + for (int i = 0; i < NUM_OF_RS; i++) { + boolean found = false; + for (ServerName serverName : localServers) { + if (serverName.getPort() == rsPorts[i]) { + found = true; + break; + } + } + assertTrue(found); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java new file mode 100644 index 000000000000..2cbb2c7e2f63 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRetainAssignmentOnRestartSplitWithoutZk + extends TestRetainAssignmentOnRestart { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRetainAssignmentOnRestartSplitWithoutZk.class); + + @Override + protected boolean splitWALCoordinatedByZk() { + return false; + } +} From d6d67d1e9f61bb231130a9cd453d86594841da9c Mon Sep 17 00:00:00 2001 From: robin7roy <57340134+robin7roy@users.noreply.github.com> Date: Wed, 3 Feb 2021 10:34:34 +0530 Subject: [PATCH 354/769] HBASE-25546 PerfTestCompactionPolicies is failing because of NPE (#2921) Adds mock for getRegionInfo which was missing earlier. Signed-off-by: Pankaj Kumar --- .../regionserver/compactions/PerfTestCompactionPolicies.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 13c7a6bc1039..0c84507c9568 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; @@ -197,6 +198,7 @@ private HStore createMockStore() { HStore s = mock(HStore.class); when(s.getStoreFileTtl()).thenReturn(Long.MAX_VALUE); when(s.getBlockingFileCount()).thenReturn(7L); + when(s.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); return s; } From 1e8ff16eae5a5e00df7d0d9694e924800aac2af4 Mon Sep 17 00:00:00 2001 From: robin7roy <57340134+robin7roy@users.noreply.github.com> Date: Thu, 4 Feb 2021 20:55:40 +0530 Subject: [PATCH 355/769] HBASE-25546 PerfTestCompactionPolicies is failing because of NPE (addendum) (#2926) Added HBaseClassTestRule for PerfTestCompactionPolicies --- .../compactions/PerfTestCompactionPolicies.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 0c84507c9568..e6b0499c738d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -28,6 +28,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.HStore; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -45,6 +47,10 @@ @RunWith(Parameterized.class) public class PerfTestCompactionPolicies extends MockStoreFileGenerator { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(PerfTestCompactionPolicies.class); + private final RatioBasedCompactionPolicy cp; private final StoreFileListGenerator generator; private final HStore store; From 3a690931f50521f6a88a20c733ac2fd9a2f2a321 Mon Sep 17 00:00:00 2001 From: stack Date: Thu, 4 Feb 2021 11:27:06 -0800 Subject: [PATCH 356/769] HBASE-25546 PerfTestCompactionPolicies is failing because of NPE (addendum2); add class comment --- .../regionserver/compactions/PerfTestCompactionPolicies.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index e6b0499c738d..314b96695b6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -43,6 +43,11 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +/** + * This is not a unit test. It is not run as part of the general unit test suite. It is for + * comparing compaction policies. You must run it explicitly; + * e.g. mvn test -Dtest=PerfTestCompactionPolicies + */ @Category({RegionServerTests.class, MediumTests.class}) @RunWith(Parameterized.class) public class PerfTestCompactionPolicies extends MockStoreFileGenerator { From 79057497331f6f381e6dc46a81f43f3f6b5ae4a2 Mon Sep 17 00:00:00 2001 From: YutSean <33572832+YutSean@users.noreply.github.com> Date: Fri, 5 Feb 2021 15:37:34 +0800 Subject: [PATCH 357/769] HBASE-25543 When configuration hadoop.security.authorization is set to false, the system will still try to authorize an RPC and raise AccessDeniedException (#2919) Signed-off-by: Viraj Jasani Signed-off-by: Reid Chan --- .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java | 3 +++ .../java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index ca8593ee3d5d..b0e8b7d3d5d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -822,4 +822,7 @@ public void setNamedQueueRecorder(NamedQueueRecorder namedQueueRecorder) { this.namedQueueRecorder = namedQueueRecorder; } + protected boolean needAuthorization() { + return authorize; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 0226de4792c9..422003e1a6a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -449,7 +449,7 @@ public void processOneRpc(ByteBuff buf) throws IOException, } else { processConnectionHeader(buf); this.connectionHeaderRead = true; - if (!authorizeConnection()) { + if (rpcServer.needAuthorization() && !authorizeConnection()) { // Throw FatalConnectionException wrapping ACE so client does right thing and closes // down the connection instead of trying to read non-existent retun. throw new AccessDeniedException("Connection from " + this + " for service " + From 304379605a937fbeb0f63dfc78ec0886e2faa90c Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 5 Feb 2021 16:31:53 +0800 Subject: [PATCH 358/769] HBASE-25554 NPE when init RegionMover (#2927) Signed-off-by: Viraj Jasani --- .../main/java/org/apache/hadoop/hbase/util/RegionMover.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 210e9e17a39f..778d66da63d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -87,7 +87,6 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { public static final int DEFAULT_MOVE_RETRIES_MAX = 5; public static final int DEFAULT_MOVE_WAIT_MAX = 60; public static final int DEFAULT_SERVERSTART_WAIT_MAX = 180; - private final RackManager rackManager; private static final Logger LOG = LoggerFactory.getLogger(RegionMover.class); @@ -103,6 +102,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { private int port; private Connection conn; private Admin admin; + private RackManager rackManager; private RegionMover(RegionMoverBuilder builder) throws IOException { this.hostname = builder.hostname; @@ -125,7 +125,6 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { } private RegionMover() { - rackManager = new RackManager(conf); } @Override From d6aff6cbae5157b99c3e1c83472c7d3243a131db Mon Sep 17 00:00:00 2001 From: XinSun Date: Sun, 7 Feb 2021 17:13:47 +0800 Subject: [PATCH 359/769] HBASE-25553 It is better for ReplicationTracker.getListOfRegionServers to return ServerName instead of String (#2928) Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../hbase/replication/ReplicationTracker.java | 7 ++++--- .../replication/ReplicationTrackerZKImpl.java | 16 ++++++++++------ .../regionserver/DumpReplicationQueues.java | 4 ++-- .../regionserver/ReplicationSourceManager.java | 3 +-- .../TestReplicationTrackerZKImpl.java | 18 +++++++++--------- 5 files changed, 26 insertions(+), 22 deletions(-) diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java index 93a32630d559..a33e23dc96b8 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java @@ -20,6 +20,7 @@ import java.util.List; +import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; /** @@ -37,13 +38,13 @@ public interface ReplicationTracker { * Register a replication listener to receive replication events. * @param listener the listener to register */ - public void registerListener(ReplicationListener listener); + void registerListener(ReplicationListener listener); - public void removeListener(ReplicationListener listener); + void removeListener(ReplicationListener listener); /** * Returns a list of other live region servers in the cluster. * @return List of region servers. */ - public List getListOfRegionServers(); + List getListOfRegionServers(); } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java index 54c9c2cdc0af..6fc3c452723d 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java @@ -20,7 +20,10 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.stream.Collectors; + import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -49,7 +52,7 @@ public class ReplicationTrackerZKImpl implements ReplicationTracker { // listeners to be notified private final List listeners = new CopyOnWriteArrayList<>(); // List of all the other region servers in this cluster - private final ArrayList otherRegionServers = new ArrayList<>(); + private final List otherRegionServers = new ArrayList<>(); public ReplicationTrackerZKImpl(ZKWatcher zookeeper, Abortable abortable, Stoppable stopper) { this.zookeeper = zookeeper; @@ -74,10 +77,10 @@ public void removeListener(ReplicationListener listener) { * Return a snapshot of the current region servers. */ @Override - public List getListOfRegionServers() { + public List getListOfRegionServers() { refreshOtherRegionServersList(false); - List list = null; + List list = null; synchronized (otherRegionServers) { list = new ArrayList<>(otherRegionServers); } @@ -162,7 +165,7 @@ private String getZNodeName(String fullPath) { * if it was empty), false if the data was missing in ZK */ private boolean refreshOtherRegionServersList(boolean watch) { - List newRsList = getRegisteredRegionServers(watch); + List newRsList = getRegisteredRegionServers(watch); if (newRsList == null) { return false; } else { @@ -178,7 +181,7 @@ private boolean refreshOtherRegionServersList(boolean watch) { * Get a list of all the other region servers in this cluster and set a watch * @return a list of server nanes */ - private List getRegisteredRegionServers(boolean watch) { + private List getRegisteredRegionServers(boolean watch) { List result = null; try { if (watch) { @@ -190,6 +193,7 @@ private List getRegisteredRegionServers(boolean watch) { } catch (KeeperException e) { this.abortable.abort("Get list of registered region servers", e); } - return result; + return result == null ? null : + result.stream().map(ServerName::parseServerName).collect(Collectors.toList()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index cc0d9bbaa2e7..92c57a89d6be 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -308,7 +308,7 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); replicationTracker = ReplicationFactory.getReplicationTracker(zkw, new WarnOnlyAbortable(), new WarnOnlyStoppable()); - Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); + Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); // Loops each peer on each RS and dumps the queues List regionservers = queueStorage.getListOfReplicators(); @@ -317,7 +317,7 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, } for (ServerName regionserver : regionservers) { List queueIds = queueStorage.getAllQueues(regionserver); - if (!liveRegionServers.contains(regionserver.getServerName())) { + if (!liveRegionServers.contains(regionserver)) { deadRegionServers.add(regionserver.getServerName()); } for (String queueId : queueIds) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index c1166802b0ee..303a091ac98f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -283,8 +283,7 @@ private void adoptAbandonedQueues() { if (currentReplicators == null || currentReplicators.isEmpty()) { return; } - List otherRegionServers = replicationTracker.getListOfRegionServers().stream() - .map(ServerName::valueOf).collect(Collectors.toList()); + List otherRegionServers = replicationTracker.getListOfRegionServers(); LOG.info( "Current list of replicators: " + currentReplicators + " other RSs: " + otherRegionServers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index 1500a717c401..da82e19f2ab3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -115,26 +115,26 @@ public void testGetListOfRegionServers() throws Exception { assertEquals(0, rt.getListOfRegionServers().size()); // 1 region server - ZKUtil.createWithParents(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname1.example.org:1234")); - List rss = rt.getListOfRegionServers(); + ZKUtil.createWithParents(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname1.example.org,1234,1611218678009")); + List rss = rt.getListOfRegionServers(); assertEquals(rss.toString(), 1, rss.size()); // 2 region servers - ZKUtil.createWithParents(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname2.example.org:1234")); + ZKUtil.createWithParents(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname2.example.org,1234,1611218678009")); rss = rt.getListOfRegionServers(); assertEquals(rss.toString(), 2, rss.size()); // 1 region server - ZKUtil.deleteNode(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname2.example.org:1234")); + ZKUtil.deleteNode(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname2.example.org,1234,1611218678009")); rss = rt.getListOfRegionServers(); assertEquals(1, rss.size()); // 0 region server - ZKUtil.deleteNode(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname1.example.org:1234")); + ZKUtil.deleteNode(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname1.example.org,1234,1611218678009")); rss = rt.getListOfRegionServers(); assertEquals(rss.toString(), 0, rss.size()); } From d21e4eb62ea2f7f7fdcba348829fe0d2f0fe2838 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:06:52 +0300 Subject: [PATCH 360/769] Added AdaptiveLruBlockCache --- .../hbase/io/hfile/AdaptiveLruBlockCache.java | 1433 +++++++++++++++++ .../hbase/io/hfile/BlockCacheFactory.java | 2 + .../hadoop/hbase/io/hfile/LruBlockCache.java | 297 +--- .../io/hfile/TestAdaptiveLruBlockCache.java | 1124 +++++++++++++ .../hbase/io/hfile/TestLruBlockCache.java | 299 ++-- 5 files changed, 2725 insertions(+), 430 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java new file mode 100644 index 000000000000..a57464bb1d6d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -0,0 +1,1433 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static java.util.Objects.requireNonNull; + +import java.lang.ref.WeakReference; +import java.util.EnumMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; +import org.apache.hbase.thirdparty.com.google.common.base.Objects; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * This realisation improve performance of classical LRU cache up to 3 times via reduce GC job. + *

    + * The classical block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an + * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a + * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} + * operations. + *

    + * Contains three levels of block priority to allow for scan-resistance and in-memory families + * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An + * in-memory column family is a column family that should be served from memory if possible): + * single-access, multiple-accesses, and in-memory priority. A block is added with an in-memory + * priority flag if {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptor#isInMemory()}, + * otherwise a block becomes a single access priority the first time it is read into this block + * cache. If a block is accessed again while in cache, it is marked as a multiple access priority + * block. This delineation of blocks is used to prevent scans from thrashing the cache adding a + * least-frequently-used element to the eviction algorithm. + *

    + * Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each + * priority will retain close to its maximum size, however, if any priority is not using its entire + * chunk the others are able to grow beyond their chunk size. + *

    + * Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The + * block size is not especially important as this cache is fully dynamic in its sizing of blocks. It + * is only used for pre-allocating data structures and in initial heap estimation of the map. + *

    + * The detailed constructor defines the sizes for the three priorities (they should total to the + * maximum size defined). It also sets the levels that trigger and control the eviction + * thread. + *

    + * The acceptable size is the cache size level which triggers the eviction process to + * start. It evicts enough blocks to get the size below the minimum size specified. + *

    + * Eviction happens in a separate thread and involves a single full-scan of the map. It determines + * how many bytes must be freed to reach the minimum size, and then while scanning determines the + * fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times + * bytes to free). It then uses the priority chunk sizes to evict fairly according to the relative + * sizes and usage. + *

    + * Adaptive LRU cache lets speed up performance while we are reading much more data than can fit + * into BlockCache and it is the cause of a high rate of evictions. This in turn leads to heavy + * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending + * a lot of CPU resources for cleaning. We could avoid this situation via parameters: + *

    + * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the + * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it + * meats the feature will start at the beginning. But if we have some times short reading the same + * data and some times long-term reading - we can divide it by this parameter. For example we know + * that our short reading used to be about 1 minutes, then we have to set the parameter about 10 + * and it will enable the feature only for long time massive reading (after ~100 seconds). So when + * we use short-reading and want all of them in the cache we will have it (except for eviction of + * course). When we use long-term heavy reading the feature will be enabled after some time and + * bring better performance. + *

    + * hbase.lru.cache.heavy.eviction.mb.size.limit - set how many bytes in 10 seconds desirable + * putting into BlockCache (and evicted from it). The feature will try to reach this value and + * maintain it. Don't try to set it too small because it leads to premature exit from this mode. + * For powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system + * (~10 cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. + * How it works: we set the limit and after each ~10 second calculate how many bytes were freed. + * Overhead = Freed Bytes Sum (MB) * 100 / Limit (MB) - 100; + * For example we set the limit = 500 and were evicted 2000 MB. Overhead is: + * 2000 * 100 / 500 - 100 = 300% + * The feature is going to reduce a percent caching data blocks and fit evicted bytes closer to + * 100% (500 MB). Some kind of an auto-scaling. + * If freed bytes less then the limit we have got negative overhead. + * For example if were freed 200 MB: + * 200 * 100 / 500 - 100 = -60% + * The feature will increase the percent of caching blocks. + * That leads to fit evicted bytes closer to 100% (500 MB). + * The current situation we can find out in the log of RegionServer: + * BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current caching + * DataBlock (%): 100 < no eviction, 100% blocks is caching + * BlockCache evicted (MB): 2000, overhead (%): 300, heavy eviction counter: 1, current caching + * DataBlock (%): 97 < eviction begin, reduce of caching blocks by 3%. + * It help to tune your system and find out what value is better set. Don't try to reach 0% + * overhead, it is impossible. Quite good 50-100% overhead, + * it prevents premature exit from this mode. + *

    + * hbase.lru.cache.heavy.eviction.overhead.coefficient - set how fast we want to get the + * result. If we know that our reading is heavy for a long time, we don't want to wait and can + * increase the coefficient and get good performance sooner. But if we aren't sure we can do it + * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher + * we can get better performance when heavy reading is stable. But when reading is changing we + * can adjust to it and set the coefficient to lower value. + * For example, we set the coefficient = 0.01. It means the overhead (see above) will be + * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, + * if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. + * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term + * fluctuation and we will try to stay in this mode. It helps avoid premature exit during + * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. + *

    + * Find more information about improvement: https://issues.apache.org/jira/browse/HBASE-23887 + */ +@InterfaceAudience.Private +public class AdaptiveLruBlockCache implements FirstLevelBlockCache { + + private static final Logger LOG = LoggerFactory.getLogger(AdaptiveLruBlockCache.class); + + /** + * Percentage of total size that eviction will evict until; e.g. if set to .8, then we will keep + * evicting during an eviction run till the cache size is down to 80% of the total. + */ + private static final String LRU_MIN_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.min.factor"; + + /** + * Acceptable size of cache (no evictions if size < acceptable) + */ + private static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = + "hbase.lru.blockcache.acceptable.factor"; + + /** + * Hard capacity limit of cache, will reject any put if size > this * acceptable + */ + static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = + "hbase.lru.blockcache.hard.capacity.limit.factor"; + private static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.single.percentage"; + private static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.multi.percentage"; + private static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.memory.percentage"; + + /** + * Configuration key to force data-block always (except in-memory are too much) + * cached in memory for in-memory hfile, unlike inMemory, which is a column-family + * configuration, inMemoryForceMode is a cluster-wide configuration + */ + private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = + "hbase.lru.rs.inmemoryforcemode"; + + /* Default Configuration Parameters*/ + + /* Backing Concurrent Map Configuration */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /* Eviction thresholds */ + private static final float DEFAULT_MIN_FACTOR = 0.95f; + static final float DEFAULT_ACCEPTABLE_FACTOR = 0.99f; + + /* Priority buckets */ + private static final float DEFAULT_SINGLE_FACTOR = 0.25f; + private static final float DEFAULT_MULTI_FACTOR = 0.50f; + private static final float DEFAULT_MEMORY_FACTOR = 0.25f; + + private static final float DEFAULT_HARD_CAPACITY_LIMIT_FACTOR = 1.2f; + + private static final boolean DEFAULT_IN_MEMORY_FORCE_MODE = false; + + /* Statistics thread */ + private static final int STAT_THREAD_PERIOD = 60 * 5; + private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; + private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; + + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT + = "hbase.lru.cache.heavy.eviction.count.limit"; + // Default value actually equal to disable feature of increasing performance. + // Because 2147483647 is about ~680 years (after that it will start to work) + // We can set it to 0-10 and get the profit right now. + // (see details https://issues.apache.org/jira/browse/HBASE-23887). + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; + + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT + = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; + + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT + = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; + + /** + * Defined the cache map as {@link ConcurrentHashMap} here, because in + * {@link AdaptiveLruBlockCache#getBlock}, we need to guarantee the atomicity of map#computeIfPresent + * (key, func). Besides, the func method must execute exactly once only when the key is present + * and under the lock context, otherwise the reference count will be messed up. Notice that the + * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + */ + private transient final ConcurrentHashMap map; + + /** Eviction lock (locked when eviction in process) */ + private transient final ReentrantLock evictionLock = new ReentrantLock(true); + + private final long maxBlockSize; + + /** Volatile boolean to track if we are in an eviction process or not */ + private volatile boolean evictionInProgress = false; + + /** Eviction thread */ + private transient final EvictionThread evictionThread; + + /** Statistics thread schedule pool (for heavy debugging, could remove) */ + private transient final ScheduledExecutorService scheduleThreadPool = + Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setNameFormat("AdaptiveLruBlockCacheStatsExecutor").setDaemon(true).build()); + + /** Current size of cache */ + private final AtomicLong size; + + /** Current size of data blocks */ + private final LongAdder dataBlockSize; + + /** Current number of cached elements */ + private final AtomicLong elements; + + /** Current number of cached data block elements */ + private final LongAdder dataBlockElements; + + /** Cache access count (sequential ID) */ + private final AtomicLong count; + + /** hard capacity limit */ + private float hardCapacityLimitFactor; + + /** Cache statistics */ + private final CacheStats stats; + + /** Maximum allowable size of cache (block put if size > max, evict) */ + private long maxSize; + + /** Approximate block size */ + private long blockSize; + + /** Acceptable size of cache (no evictions if size < acceptable) */ + private float acceptableFactor; + + /** Minimum threshold of cache (when evicting, evict until size < min) */ + private float minFactor; + + /** Single access bucket size */ + private float singleFactor; + + /** Multiple access bucket size */ + private float multiFactor; + + /** In-memory bucket size */ + private float memoryFactor; + + /** Overhead of the structure itself */ + private long overhead; + + /** Whether in-memory hfile's data block has higher priority when evicting */ + private boolean forceInMemory; + + /** + * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an + * external cache as L2. + * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + */ + private transient BlockCache victimHandler = null; + + /** Percent of cached data blocks */ + private volatile int cacheDataBlockPercent; + + /** Limit of count eviction process when start to avoid to cache blocks */ + private final int heavyEvictionCountLimit; + + /** Limit of volume eviction process when start to avoid to cache blocks */ + private final long heavyEvictionMbSizeLimit; + + /** Adjust auto-scaling via overhead of evition rate */ + private final float heavyEvictionOverheadCoefficient; + + /** + * Default constructor. Specify maximum size and expected average block + * size (approximation is fine). + * + *

    All other factors will be calculated based on defaults specified in + * this class. + * + * @param maxSize maximum size of cache, in bytes + * @param blockSize approximate size of each block, in bytes + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize) { + this(maxSize, blockSize, true); + } + + /** + * Constructor used for testing. Allows disabling of the eviction thread. + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread) { + this(maxSize, blockSize, evictionThread, + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, + DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, + DEFAULT_MULTI_FACTOR, + DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, + false, + DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); + } + + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { + this(maxSize, blockSize, evictionThread, + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, + DEFAULT_CONCURRENCY_LEVEL, + conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), + conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), + conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), + conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), + conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), + conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); + } + + public AdaptiveLruBlockCache(long maxSize, long blockSize, Configuration conf) { + this(maxSize, blockSize, true, conf); + } + + /** + * Configurable constructor. Use this constructor if not using defaults. + * + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not + * @param mapInitialSize initial size of backing ConcurrentHashMap + * @param mapLoadFactor initial load factor of backing ConcurrentHashMap + * @param mapConcurrencyLevel initial concurrency factor for backing CHM + * @param minFactor percentage of total size that eviction will evict until + * @param acceptableFactor percentage of total size that triggers eviction + * @param singleFactor percentage of total size for single-access blocks + * @param multiFactor percentage of total size for multiple-access blocks + * @param memoryFactor percentage of total size for in-memory blocks + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread, + int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, + float minFactor, float acceptableFactor, float singleFactor, + float multiFactor, float memoryFactor, float hardLimitFactor, + boolean forceInMemory, long maxBlockSize, + int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, + float heavyEvictionOverheadCoefficient) { + this.maxBlockSize = maxBlockSize; + if(singleFactor + multiFactor + memoryFactor != 1 || + singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { + throw new IllegalArgumentException("Single, multi, and memory factors " + + " should be non-negative and total 1.0"); + } + if (minFactor >= acceptableFactor) { + throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); + } + if (minFactor >= 1.0f || acceptableFactor >= 1.0f) { + throw new IllegalArgumentException("all factors must be < 1"); + } + this.maxSize = maxSize; + this.blockSize = blockSize; + this.forceInMemory = forceInMemory; + map = new ConcurrentHashMap<>(mapInitialSize, mapLoadFactor, mapConcurrencyLevel); + this.minFactor = minFactor; + this.acceptableFactor = acceptableFactor; + this.singleFactor = singleFactor; + this.multiFactor = multiFactor; + this.memoryFactor = memoryFactor; + this.stats = new CacheStats(this.getClass().getSimpleName()); + this.count = new AtomicLong(0); + this.elements = new AtomicLong(0); + this.dataBlockElements = new LongAdder(); + this.dataBlockSize = new LongAdder(); + this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel); + this.size = new AtomicLong(this.overhead); + this.hardCapacityLimitFactor = hardLimitFactor; + if (evictionThread) { + this.evictionThread = new EvictionThread(this); + this.evictionThread.start(); // FindBugs SC_START_IN_CTOR + } else { + this.evictionThread = null; + } + + // check the bounds + this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; + this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; + this.cacheDataBlockPercent = 100; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 0.1f + ? 1f : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001f + ? 0.001f : heavyEvictionOverheadCoefficient; + this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; + + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // every five minutes. + this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, + STAT_THREAD_PERIOD, TimeUnit.SECONDS); + } + + @Override + public void setVictimCache(BlockCache victimCache) { + if (victimHandler != null) { + throw new IllegalArgumentException("The victim cache has already been set"); + } + victimHandler = requireNonNull(victimCache); + } + + @Override + public void setMaxSize(long maxSize) { + this.maxSize = maxSize; + if (this.size.get() > acceptableSize() && !evictionInProgress) { + runEviction(); + } + } + + public int getCacheDataBlockPercent() { + return cacheDataBlockPercent; + } + + /** + * The block cached in AdaptiveLruBlockCache will always be an heap block: on the one side, the heap + * access will be more faster then off-heap, the small index block or meta block cached in + * CombinedBlockCache will benefit a lot. on other side, the AdaptiveLruBlockCache size is always + * calculated based on the total heap size, if caching an off-heap block in AdaptiveLruBlockCache, the + * heap size will be messed up. Here we will clone the block into an heap block if it's an + * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of + * the block (HBASE-22127):
    + * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
    + * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's + * reservoir, if both RPC and AdaptiveLruBlockCache release the block, then it can be garbage collected by + * JVM, so need a retain here. + * @param buf the original block + * @return an block with an heap memory backend. + */ + private Cacheable asReferencedHeapBlock(Cacheable buf) { + if (buf instanceof HFileBlock) { + HFileBlock blk = ((HFileBlock) buf); + if (blk.isSharedMem()) { + return HFileBlock.deepCloneOnHeap(blk); + } + } + // The block will be referenced by this AdaptiveLruBlockCache, so should increase its refCnt here. + return buf.retain(); + } + + // BlockCache implementation + + /** + * Cache the block with the specified name and buffer. + *

    + * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) + * this can happen, for which we compare the buffer contents. + * + * @param cacheKey block's cache key + * @param buf block buffer + * @param inMemory if block is in-memory + */ + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { + + // Some data blocks will not put into BlockCache when eviction rate too much. + // It is good for performance + // (see details: https://issues.apache.org/jira/browse/HBASE-23887) + // How to calculate it can find inside EvictionThread class. + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + // It works like filter - blocks which two last digits of offset + // more than we calculate in Eviction Thread will not put into BlockCache + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; + } + } + + if (buf.heapSize() > maxBlockSize) { + // If there are a lot of blocks that are too + // big this can make the logs way too noisy. + // So we log 2% + if (stats.failInsert() % 50 == 0) { + LOG.warn("Trying to cache too large a block " + + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + + " is " + buf.heapSize() + + " which is larger than " + maxBlockSize); + } + return; + } + + LruCachedBlock cb = map.get(cacheKey); + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, buf)) { + return; + } + long currentSize = size.get(); + long currentAcceptableSize = acceptableSize(); + long hardLimitSize = (long) (hardCapacityLimitFactor * currentAcceptableSize); + if (currentSize >= hardLimitSize) { + stats.failInsert(); + if (LOG.isTraceEnabled()) { + LOG.trace("AdaptiveLruBlockCache current size " + StringUtils.byteDesc(currentSize) + + " has exceeded acceptable size " + StringUtils.byteDesc(currentAcceptableSize) + "." + + " The hard limit size is " + StringUtils.byteDesc(hardLimitSize) + + ", failed to put cacheKey:" + cacheKey + " into AdaptiveLruBlockCache."); + } + if (!evictionInProgress) { + runEviction(); + } + return; + } + // Ensure that the block is an heap one. + buf = asReferencedHeapBlock(buf); + cb = new LruCachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory); + long newSize = updateSizeMetrics(cb, false); + map.put(cacheKey, cb); + long val = elements.incrementAndGet(); + if (buf.getBlockType().isData()) { + dataBlockElements.increment(); + } + if (LOG.isTraceEnabled()) { + long size = map.size(); + assertCounterSanity(size, val); + } + if (newSize > currentAcceptableSize && !evictionInProgress) { + runEviction(); + } + } + + /** + * Sanity-checking for parity between actual block cache content and metrics. + * Intended only for use with TRACE level logging and -ea JVM. + */ + private static void assertCounterSanity(long mapSize, long counterVal) { + if (counterVal < 0) { + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); + return; + } + if (mapSize < Integer.MAX_VALUE) { + double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); + if (pct_diff > 0.05) { + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); + } + } + } + + /** + * Cache the block with the specified name and buffer. + *

    + * TODO after HBASE-22005, we may cache an block which allocated from off-heap, but our LRU cache + * sizing is based on heap size, so we should handle this in HBASE-22127. It will introduce an + * switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap, + * otherwise the caching size is based on off-heap. + * @param cacheKey block's cache key + * @param buf block buffer + */ + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { + cacheBlock(cacheKey, buf, false); + } + + /** + * Helper function that updates the local size counter and also updates any + * per-cf or per-blocktype metrics it can discern from given + * {@link LruCachedBlock} + */ + private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { + long heapsize = cb.heapSize(); + BlockType bt = cb.getBuffer().getBlockType(); + if (evict) { + heapsize *= -1; + } + if (bt != null && bt.isData()) { + dataBlockSize.add(heapsize); + } + return size.addAndGet(heapsize); + } + + /** + * Get the buffer of the block with the specified name. + * + * @param cacheKey block's cache key + * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block + * (used to avoid double counting cache misses when doing double-check + * locking) + * @param updateCacheMetrics Whether to update cache metrics or not + * + * @return buffer of specified cache key, or null if not in cache + */ + @Override + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { + LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> { + // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside + // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove + // the block and release, then we're retaining a block with refCnt=0 which is disallowed. + // see HBASE-22422. + val.getBuffer().retain(); + return val; + }); + if (cb == null) { + if (!repeat && updateCacheMetrics) { + stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + } + // If there is another block cache then try and read there. + // However if this is a retry ( second time in double checked locking ) + // And it's already a miss then the l2 will also be a miss. + if (victimHandler != null && !repeat) { + // The handler will increase result's refCnt for RPC, so need no extra retain. + Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + // Promote this to L1. + if (result != null) { + if (caching) { + cacheBlock(cacheKey, result, /* inMemory = */ false); + } + } + return result; + } + return null; + } + if (updateCacheMetrics) { + stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + } + cb.access(count.incrementAndGet()); + return cb.getBuffer(); + } + + /** + * Whether the cache contains block with specified cacheKey + * + * @return true if contains the block + */ + @Override + public boolean containsBlock(BlockCacheKey cacheKey) { + return map.containsKey(cacheKey); + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + LruCachedBlock cb = map.get(cacheKey); + return cb != null && evictBlock(cb, false) > 0; + } + + /** + * Evicts all blocks for a specific HFile. This is an + * expensive operation implemented as a linear-time search through all blocks + * in the cache. Ideally this should be a search in a log-access-time map. + * + *

    + * This is used for evict-on-close to remove all blocks of a specific HFile. + * + * @return the number of blocks evicted + */ + @Override + public int evictBlocksByHfileName(String hfileName) { + int numEvicted = 0; + for (BlockCacheKey key : map.keySet()) { + if (key.getHfileName().equals(hfileName)) { + if (evictBlock(key)) { + ++numEvicted; + } + } + } + if (victimHandler != null) { + numEvicted += victimHandler.evictBlocksByHfileName(hfileName); + } + return numEvicted; + } + + /** + * Evict the block, and it will be cached by the victim handler if exists && + * block may be read again later + * + * @param evictedByEvictionProcess true if the given block is evicted by + * EvictionThread + * @return the heap size of evicted block + */ + protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { + LruCachedBlock previous = map.remove(block.getCacheKey()); + if (previous == null) { + return 0; + } + updateSizeMetrics(block, true); + long val = elements.decrementAndGet(); + if (LOG.isTraceEnabled()) { + long size = map.size(); + assertCounterSanity(size, val); + } + if (block.getBuffer().getBlockType().isData()) { + dataBlockElements.decrement(); + } + if (evictedByEvictionProcess) { + // When the eviction of the block happened because of invalidation of HFiles, no need to + // update the stats counter. + stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary()); + if (victimHandler != null) { + victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); + } + } + // Decrease the block's reference count, and if refCount is 0, then it'll auto-deallocate. DO + // NOT move this up because if do that then the victimHandler may access the buffer with + // refCnt = 0 which is disallowed. + previous.getBuffer().release(); + return block.heapSize(); + } + + /** + * Multi-threaded call to run the eviction process. + */ + private void runEviction() { + if (evictionThread == null) { + evict(); + } else { + evictionThread.evict(); + } + } + + boolean isEvictionInProgress() { + return evictionInProgress; + } + + long getOverhead() { + return overhead; + } + + /** + * Eviction method. + * + * Evict items in order of use, allowing delete items + * which haven't been used for the longest amount of time. + * + * @return how many bytes were freed + */ + long evict() { + + // Ensure only one eviction at a time + if (!evictionLock.tryLock()) { + return 0; + } + + long bytesToFree = 0L; + + try { + evictionInProgress = true; + long currentSize = this.size.get(); + bytesToFree = currentSize - minSize(); + + if (LOG.isTraceEnabled()) { + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + + StringUtils.byteDesc(currentSize)); + } + + if (bytesToFree <= 0) { + return 0; + } + + // Instantiate priority buckets + BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + + // Scan entire map putting into appropriate buckets + for (LruCachedBlock cachedBlock : map.values()) { + switch (cachedBlock.getPriority()) { + case SINGLE: { + bucketSingle.add(cachedBlock); + break; + } + case MULTI: { + bucketMulti.add(cachedBlock); + break; + } + case MEMORY: { + bucketMemory.add(cachedBlock); + break; + } + } + } + + long bytesFreed = 0; + if (forceInMemory || memoryFactor > 0.999f) { + long s = bucketSingle.totalSize(); + long m = bucketMulti.totalSize(); + if (bytesToFree > (s + m)) { + // this means we need to evict blocks in memory bucket to make room, + // so the single and multi buckets will be emptied + bytesFreed = bucketSingle.free(s); + bytesFreed += bucketMulti.free(m); + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + + " from single and multi buckets"); + } + bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + + " total from all three buckets "); + } + } else { + // this means no need to evict block in memory bucket, + // and we try best to make the ratio between single-bucket and + // multi-bucket is 1:2 + long bytesRemain = s + m - bytesToFree; + if (3 * s <= bytesRemain) { + // single-bucket is small enough that no eviction happens for it + // hence all eviction goes from multi-bucket + bytesFreed = bucketMulti.free(bytesToFree); + } else if (3 * m <= 2 * bytesRemain) { + // multi-bucket is small enough that no eviction happens for it + // hence all eviction goes from single-bucket + bytesFreed = bucketSingle.free(bytesToFree); + } else { + // both buckets need to evict some blocks + bytesFreed = bucketSingle.free(s - bytesRemain / 3); + if (bytesFreed < bytesToFree) { + bytesFreed += bucketMulti.free(bytesToFree - bytesFreed); + } + } + } + } else { + PriorityQueue bucketQueue = new PriorityQueue<>(3); + + bucketQueue.add(bucketSingle); + bucketQueue.add(bucketMulti); + bucketQueue.add(bucketMemory); + + int remainingBuckets = bucketQueue.size(); + + BlockBucket bucket; + while ((bucket = bucketQueue.poll()) != null) { + long overflow = bucket.overflow(); + if (overflow > 0) { + long bucketBytesToFree = + Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); + bytesFreed += bucket.free(bucketBytesToFree); + } + remainingBuckets--; + } + } + if (LOG.isTraceEnabled()) { + long single = bucketSingle.totalSize(); + long multi = bucketMulti.totalSize(); + long memory = bucketMemory.totalSize(); + LOG.trace("Block cache LRU eviction completed; " + + "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + + "single=" + StringUtils.byteDesc(single) + ", " + + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); + } + } finally { + stats.evict(); + evictionInProgress = false; + evictionLock.unlock(); + return bytesToFree; + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("blockCount", getBlockCount()) + .add("currentSize", StringUtils.byteDesc(getCurrentSize())) + .add("freeSize", StringUtils.byteDesc(getFreeSize())) + .add("maxSize", StringUtils.byteDesc(getMaxSize())) + .add("heapSize", StringUtils.byteDesc(heapSize())) + .add("minSize", StringUtils.byteDesc(minSize())) + .add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())) + .add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())) + .add("singleFactor", singleFactor) + .toString(); + } + + /** + * Used to group blocks into priority buckets. There will be a BlockBucket + * for each priority (single, multi, memory). Once bucketed, the eviction + * algorithm takes the appropriate number of elements out of each according + * to configuration parameters and their relatives sizes. + */ + private class BlockBucket implements Comparable { + + private final String name; + private LruCachedBlockQueue queue; + private long totalSize = 0; + private long bucketSize; + + public BlockBucket(String name, long bytesToFree, long blockSize, long bucketSize) { + this.name = name; + this.bucketSize = bucketSize; + queue = new LruCachedBlockQueue(bytesToFree, blockSize); + totalSize = 0; + } + + public void add(LruCachedBlock block) { + totalSize += block.heapSize(); + queue.add(block); + } + + public long free(long toFree) { + if (LOG.isTraceEnabled()) { + LOG.trace("freeing " + StringUtils.byteDesc(toFree) + " from " + this); + } + LruCachedBlock cb; + long freedBytes = 0; + while ((cb = queue.pollLast()) != null) { + freedBytes += evictBlock(cb, true); + if (freedBytes >= toFree) { + return freedBytes; + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(freedBytes) + " from " + this); + } + return freedBytes; + } + + public long overflow() { + return totalSize - bucketSize; + } + + public long totalSize() { + return totalSize; + } + + @Override + public int compareTo(BlockBucket that) { + return Long.compare(this.overflow(), that.overflow()); + } + + @Override + public boolean equals(Object that) { + if (that == null || !(that instanceof BlockBucket)) { + return false; + } + return compareTo((BlockBucket)that) == 0; + } + + @Override + public int hashCode() { + return Objects.hashCode(name, bucketSize, queue, totalSize); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name) + .add("totalSize", StringUtils.byteDesc(totalSize)) + .add("bucketSize", StringUtils.byteDesc(bucketSize)) + .toString(); + } + } + + /** + * Get the maximum size of this cache. + * + * @return max size in bytes + */ + + @Override + public long getMaxSize() { + return this.maxSize; + } + + @Override + public long getCurrentSize() { + return this.size.get(); + } + + @Override + public long getCurrentDataSize() { + return this.dataBlockSize.sum(); + } + + @Override + public long getFreeSize() { + return getMaxSize() - getCurrentSize(); + } + + @Override + public long size() { + return getMaxSize(); + } + + @Override + public long getBlockCount() { + return this.elements.get(); + } + + @Override + public long getDataBlockCount() { + return this.dataBlockElements.sum(); + } + + EvictionThread getEvictionThread() { + return this.evictionThread; + } + + /* + * Eviction thread. Sits in waiting state until an eviction is triggered + * when the cache size grows above the acceptable level.

    + * + * Thread is triggered into action by {@link AdaptiveLruBlockCache#runEviction()} + */ + static class EvictionThread extends Thread { + + private WeakReference cache; + private volatile boolean go = true; + // flag set after enter the run method, used for test + private boolean enteringRun = false; + + public EvictionThread(AdaptiveLruBlockCache cache) { + super(Thread.currentThread().getName() + ".AdaptiveLruBlockCache.EvictionThread"); + setDaemon(true); + this.cache = new WeakReference<>(cache); + } + + @Override + public void run() { + enteringRun = true; + long freedSumMb = 0; + int heavyEvictionCount = 0; + int freedDataOverheadPercent = 0; + long startTime = System.currentTimeMillis(); + while (this.go) { + synchronized (this) { + try { + this.wait(1000 * 10/*Don't wait for ever*/); + } catch (InterruptedException e) { + LOG.warn("Interrupted eviction thread ", e); + Thread.currentThread().interrupt(); + } + } + AdaptiveLruBlockCache cache = this.cache.get(); + if (cache == null) { + break; + } + freedSumMb += cache.evict()/1024/1024; + /* + * Sometimes we are reading more data than can fit into BlockCache + * and it is the cause a high rate of evictions. + * This in turn leads to heavy Garbage Collector works. + * So a lot of blocks put into BlockCache but never read, + * but spending a lot of CPU resources. + * Here we will analyze how many bytes were freed and decide + * decide whether the time has come to reduce amount of caching blocks. + * It help avoid put too many blocks into BlockCache + * when evict() works very active and save CPU for other jobs. + * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + */ + + // First of all we have to control how much time + // has passed since previuos evict() was launched + // This is should be almost the same time (+/- 10s) + // because we get comparable volumes of freed bytes each time. + // 10s because this is default period to run evict() (see above this.wait) + long stopTime = System.currentTimeMillis(); + if ((stopTime - startTime) > 1000 * 10 - 1) { + // Here we have to calc what situation we have got. + // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" + // and can calculte overhead on it. + // We will use this information to decide, + // how to change percent of caching blocks. + freedDataOverheadPercent = + (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; + if (freedSumMb > cache.heavyEvictionMbSizeLimit) { + // Now we are in the situation when we are above the limit + // But maybe we are going to ignore it because it will end quite soon + heavyEvictionCount++; + if (heavyEvictionCount > cache.heavyEvictionCountLimit) { + // It is going for a long time and we have to reduce of caching + // blocks now. So we calculate here how many blocks we want to skip. + // It depends on: + // 1. Overhead - if overhead is big we could more aggressive + // reducing amount of caching blocks. + // 2. How fast we want to get the result. If we know that our + // heavy reading for a long time, we don't want to wait and can + // increase the coefficient and get good performance quite soon. + // But if we don't sure we can do it slowly and it could prevent + // premature exit from this mode. So, when the coefficient is + // higher we can get better performance when heavy reading is stable. + // But when reading is changing we can adjust to it and set + // the coefficient to lower value. + int change = + (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + // But practice shows that 15% of reducing is quite enough. + // We are not greedy (it could lead to premature exit). + change = Math.min(15, change); + change = Math.max(0, change); // I think it will never happen but check for sure + // So this is the key point, here we are reducing % of caching blocks + cache.cacheDataBlockPercent -= change; + // If we go down too deep we have to stop here, 1% any way should be. + cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); + } + } else { + // Well, we have got overshooting. + // Mayby it is just short-term fluctuation and we can stay in this mode. + // It help avoid permature exit during short-term fluctuation. + // If overshooting less than 90%, we will try to increase the percent of + // caching blocks and hope it is enough. + if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { + // Simple logic: more overshooting - more caching blocks (backpressure) + int change = (int) (-freedDataOverheadPercent * 0.1 + 1); + cache.cacheDataBlockPercent += change; + // But it can't be more then 100%, so check it. + cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); + } else { + // Looks like heavy reading is over. + // Just exit form this mode. + heavyEvictionCount = 0; + cache.cacheDataBlockPercent = 100; + } + } + LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, + heavyEvictionCount, cache.cacheDataBlockPercent); + + freedSumMb = 0; + startTime = stopTime; + } + } + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", + justification="This is what we want") + public void evict() { + synchronized (this) { + this.notifyAll(); + } + } + + synchronized void shutdown() { + this.go = false; + this.notifyAll(); + } + + /** + * Used for the test. + */ + boolean isEnteringRun() { + return this.enteringRun; + } + } + + /* + * Statistics thread. Periodically prints the cache statistics to the log. + */ + static class StatisticsThread extends Thread { + + private final AdaptiveLruBlockCache lru; + + public StatisticsThread(AdaptiveLruBlockCache lru) { + super("AdaptiveLruBlockCacheStats"); + setDaemon(true); + this.lru = lru; + } + + @Override + public void run() { + lru.logStats(); + } + } + + public void logStats() { + // Log size + long totalSize = heapSize(); + long freeSize = maxSize - totalSize; + AdaptiveLruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + + "hitRatio=" + (stats.getHitCount() == 0 ? + "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + + "cachingHits=" + stats.getHitCachingCount() + ", " + + "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? + "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + + "evicted=" + stats.getEvictedCount() + ", " + + "evictedPerRun=" + stats.evictedPerEviction()); + } + + /** + * Get counter statistics for this cache. + * + *

    Includes: total accesses, hits, misses, evicted blocks, and runs + * of the eviction processes. + */ + @Override + public CacheStats getStats() { + return this.stats; + } + + public final static long CACHE_FIXED_OVERHEAD = + ClassSize.estimateBase(AdaptiveLruBlockCache.class, false); + + @Override + public long heapSize() { + return getCurrentSize(); + } + + private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { + // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG + return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + } + + @Override + public Iterator iterator() { + final Iterator iterator = map.values().iterator(); + + return new Iterator() { + private final long now = System.nanoTime(); + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public CachedBlock next() { + final LruCachedBlock b = iterator.next(); + return new CachedBlock() { + @Override + public String toString() { + return BlockCacheUtil.toString(this, now); + } + + @Override + public BlockPriority getBlockPriority() { + return b.getPriority(); + } + + @Override + public BlockType getBlockType() { + return b.getBuffer().getBlockType(); + } + + @Override + public long getOffset() { + return b.getCacheKey().getOffset(); + } + + @Override + public long getSize() { + return b.getBuffer().heapSize(); + } + + @Override + public long getCachedTime() { + return b.getCachedTime(); + } + + @Override + public String getFilename() { + return b.getCacheKey().getHfileName(); + } + + @Override + public int compareTo(CachedBlock other) { + int diff = this.getFilename().compareTo(other.getFilename()); + if (diff != 0) { + return diff; + } + diff = Long.compare(this.getOffset(), other.getOffset()); + if (diff != 0) { + return diff; + } + if (other.getCachedTime() < 0 || this.getCachedTime() < 0) { + throw new IllegalStateException(this.getCachedTime() + ", " + other.getCachedTime()); + } + return Long.compare(other.getCachedTime(), this.getCachedTime()); + } + + @Override + public int hashCode() { + return b.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof CachedBlock) { + CachedBlock cb = (CachedBlock)obj; + return compareTo(cb) == 0; + } else { + return false; + } + } + }; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + // Simple calculators of sizes given factors and maxSize + + long acceptableSize() { + return (long)Math.floor(this.maxSize * this.acceptableFactor); + } + private long minSize() { + return (long)Math.floor(this.maxSize * this.minFactor); + } + private long singleSize() { + return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + } + private long multiSize() { + return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + } + private long memorySize() { + return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); + } + + @Override + public void shutdown() { + if (victimHandler != null) { + victimHandler.shutdown(); + } + this.scheduleThreadPool.shutdown(); + for (int i = 0; i < 10; i++) { + if (!this.scheduleThreadPool.isShutdown()) { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + LOG.warn("Interrupted while sleeping"); + Thread.currentThread().interrupt(); + break; + } + } + } + + if (!this.scheduleThreadPool.isShutdown()) { + List runnables = this.scheduleThreadPool.shutdownNow(); + LOG.debug("Still running " + runnables); + } + this.evictionThread.shutdown(); + } + + /** Clears the cache. Used in tests. */ + public void clearCache() { + this.map.clear(); + this.elements.set(0); + } + + /** + * Used in testing. May be very inefficient. + * + * @return the set of cached file names + */ + SortedSet getCachedFileNamesForTest() { + SortedSet fileNames = new TreeSet<>(); + for (BlockCacheKey cacheKey : map.keySet()) { + fileNames.add(cacheKey.getHfileName()); + } + return fileNames; + } + + public Map getEncodingCountsForTest() { + Map counts = new EnumMap<>(DataBlockEncoding.class); + for (LruCachedBlock block : map.values()) { + DataBlockEncoding encoding = ((HFileBlock) block.getBuffer()).getDataBlockEncoding(); + Integer count = counts.get(encoding); + counts.put(encoding, (count == null ? 0 : count) + 1); + } + return counts; + } + + Map getMapForTests() { + return map; + } + + @Override + public BlockCache[] getBlockCaches() { + if (victimHandler != null) { + return new BlockCache[] { this, this.victimHandler }; + } + return null; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java index 2b9732092ce9..19725489a975 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -145,6 +145,8 @@ private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) return new LruBlockCache(cacheSize, blockSize, true, c); } else if (policy.equalsIgnoreCase("TinyLFU")) { return new TinyLfuBlockCache(cacheSize, blockSize, ForkJoinPool.commonPool(), c); + } else if (policy.equalsIgnoreCase("adaptiveLRU")) { + return new AdaptiveLruBlockCache(cacheSize, blockSize, true, c); } else { throw new IllegalArgumentException("Unknown policy: " + policy); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 9d77e46a8dfe..77243e6df889 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -43,7 +43,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.base.Objects; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -100,19 +99,19 @@ public class LruBlockCache implements FirstLevelBlockCache { * Acceptable size of cache (no evictions if size < acceptable) */ private static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.acceptable.factor"; + "hbase.lru.blockcache.acceptable.factor"; /** * Hard capacity limit of cache, will reject any put if size > this * acceptable */ static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.hard.capacity.limit.factor"; + "hbase.lru.blockcache.hard.capacity.limit.factor"; private static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.single.percentage"; + "hbase.lru.blockcache.single.percentage"; private static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.multi.percentage"; + "hbase.lru.blockcache.multi.percentage"; private static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.memory.percentage"; + "hbase.lru.blockcache.memory.percentage"; /** * Configuration key to force data-block always (except in-memory are too much) @@ -120,7 +119,7 @@ public class LruBlockCache implements FirstLevelBlockCache { * configuration, inMemoryForceMode is a cluster-wide configuration */ private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = - "hbase.lru.rs.inmemoryforcemode"; + "hbase.lru.rs.inmemoryforcemode"; /* Default Configuration Parameters*/ @@ -146,22 +145,6 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT - = "hbase.lru.cache.heavy.eviction.count.limit"; - // Default value actually equal to disable feature of increasing performance. - // Because 2147483647 is about ~680 years (after that it will start to work) - // We can set it to 0-10 and get the profit right now. - // (see details https://issues.apache.org/jira/browse/HBASE-23887). - private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; - - private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.mb.size.limit"; - private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; - - private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT - = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; - private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; - /** * Defined the cache map as {@link ConcurrentHashMap} here, because in * {@link LruBlockCache#getBlock}, we need to guarantee the atomicity of map#computeIfPresent @@ -242,18 +225,6 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached data blocks */ - private volatile int cacheDataBlockPercent; - - /** Limit of count eviction process when start to avoid to cache blocks */ - private final int heavyEvictionCountLimit; - - /** Limit of volume eviction process when start to avoid to cache blocks */ - private final long heavyEvictionMbSizeLimit; - - /** Adjust auto-scaling via overhead of evition rate */ - private final float heavyEvictionOverheadCoefficient; - /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). @@ -273,40 +244,31 @@ public LruBlockCache(long maxSize, long blockSize) { */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, - DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, - DEFAULT_SINGLE_FACTOR, - DEFAULT_MULTI_FACTOR, - DEFAULT_MEMORY_FACTOR, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, - false, - DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, + DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, + DEFAULT_MULTI_FACTOR, + DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, + false, + DEFAULT_MAX_BLOCK_SIZE); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL, - conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), - conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), - conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), - conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), - conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), - conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), - conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), - conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), - conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, + DEFAULT_CONCURRENCY_LEVEL, + conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), + conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), + conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), + conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), + conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -329,17 +291,15 @@ public LruBlockCache(long maxSize, long blockSize, Configuration conf) { * @param memoryFactor percentage of total size for in-memory blocks */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, - int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, - float minFactor, float acceptableFactor, float singleFactor, - float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, - int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, - float heavyEvictionOverheadCoefficient) { + int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, + float minFactor, float acceptableFactor, float singleFactor, + float multiFactor, float memoryFactor, float hardLimitFactor, + boolean forceInMemory, long maxBlockSize) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || - singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { + singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { throw new IllegalArgumentException("Single, multi, and memory factors " + - " should be non-negative and total 1.0"); + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); @@ -370,21 +330,10 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, } else { this.evictionThread = null; } - - // check the bounds - this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; - this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; - this.cacheDataBlockPercent = 100; - heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 0.1f - ? 1f : heavyEvictionOverheadCoefficient; - heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001f - ? 0.001f : heavyEvictionOverheadCoefficient; - this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, - STAT_THREAD_PERIOD, TimeUnit.SECONDS); + STAT_THREAD_PERIOD, TimeUnit.SECONDS); } @Override @@ -403,11 +352,6 @@ public void setMaxSize(long maxSize) { } } - @VisibleForTesting - public int getCacheDataBlockPercent() { - return cacheDataBlockPercent; - } - /** * The block cached in LRUBlockCache will always be an heap block: on the one side, the heap * access will be more faster then off-heap, the small index block or meta block cached in @@ -448,29 +392,16 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - - // Some data blocks will not put into BlockCache when eviction rate too much. - // It is good for performance - // (see details: https://issues.apache.org/jira/browse/HBASE-23887) - // How to calculate it can find inside EvictionThread class. - if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { - // It works like filter - blocks which two last digits of offset - // more than we calculate in Eviction Thread will not put into BlockCache - if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { - return; - } - } - if (buf.heapSize() > maxBlockSize) { // If there are a lot of blocks that are too // big this can make the logs way too noisy. // So we log 2% if (stats.failInsert() % 50 == 0) { LOG.warn("Trying to cache too large a block " - + cacheKey.getHfileName() + " @ " - + cacheKey.getOffset() - + " is " + buf.heapSize() - + " which is larger than " + maxBlockSize); + + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + + " is " + buf.heapSize() + + " which is larger than " + maxBlockSize); } return; } @@ -578,7 +509,7 @@ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { */ @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, - boolean updateCacheMetrics) { + boolean updateCacheMetrics) { LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> { // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove @@ -704,37 +635,28 @@ private void runEviction() { } } - @VisibleForTesting boolean isEvictionInProgress() { return evictionInProgress; } - @VisibleForTesting long getOverhead() { return overhead; } /** * Eviction method. - * - * Evict items in order of use, allowing delete items - * which haven't been used for the longest amount of time. - * - * @return how many bytes were freed */ - long evict() { + void evict() { // Ensure only one eviction at a time if (!evictionLock.tryLock()) { - return 0; + return; } - long bytesToFree = 0L; - try { evictionInProgress = true; long currentSize = this.size.get(); - bytesToFree = currentSize - minSize(); + long bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { LOG.trace("Block cache LRU eviction started; Attempting to free " + @@ -743,7 +665,7 @@ long evict() { } if (bytesToFree <= 0) { - return 0; + return; } // Instantiate priority buckets @@ -822,7 +744,7 @@ long evict() { long overflow = bucket.overflow(); if (overflow > 0) { long bucketBytesToFree = - Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); + Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); bytesFreed += bucket.free(bucketBytesToFree); } remainingBuckets--; @@ -843,7 +765,6 @@ long evict() { stats.evict(); evictionInProgress = false; evictionLock.unlock(); - return bytesToFree; } } @@ -1010,10 +931,6 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { enteringRun = true; - long freedSumMb = 0; - int heavyEvictionCount = 0; - int freedDataOverheadPercent = 0; - long startTime = System.currentTimeMillis(); while (this.go) { synchronized (this) { try { @@ -1027,96 +944,12 @@ public void run() { if (cache == null) { break; } - freedSumMb += cache.evict()/1024/1024; - /* - * Sometimes we are reading more data than can fit into BlockCache - * and it is the cause a high rate of evictions. - * This in turn leads to heavy Garbage Collector works. - * So a lot of blocks put into BlockCache but never read, - * but spending a lot of CPU resources. - * Here we will analyze how many bytes were freed and decide - * decide whether the time has come to reduce amount of caching blocks. - * It help avoid put too many blocks into BlockCache - * when evict() works very active and save CPU for other jobs. - * More delails: https://issues.apache.org/jira/browse/HBASE-23887 - */ - - // First of all we have to control how much time - // has passed since previuos evict() was launched - // This is should be almost the same time (+/- 10s) - // because we get comparable volumes of freed bytes each time. - // 10s because this is default period to run evict() (see above this.wait) - long stopTime = System.currentTimeMillis(); - if ((stopTime - startTime) > 1000 * 10 - 1) { - // Here we have to calc what situation we have got. - // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" - // and can calculte overhead on it. - // We will use this information to decide, - // how to change percent of caching blocks. - freedDataOverheadPercent = - (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; - if (freedSumMb > cache.heavyEvictionMbSizeLimit) { - // Now we are in the situation when we are above the limit - // But maybe we are going to ignore it because it will end quite soon - heavyEvictionCount++; - if (heavyEvictionCount > cache.heavyEvictionCountLimit) { - // It is going for a long time and we have to reduce of caching - // blocks now. So we calculate here how many blocks we want to skip. - // It depends on: - // 1. Overhead - if overhead is big we could more aggressive - // reducing amount of caching blocks. - // 2. How fast we want to get the result. If we know that our - // heavy reading for a long time, we don't want to wait and can - // increase the coefficient and get good performance quite soon. - // But if we don't sure we can do it slowly and it could prevent - // premature exit from this mode. So, when the coefficient is - // higher we can get better performance when heavy reading is stable. - // But when reading is changing we can adjust to it and set - // the coefficient to lower value. - int change = - (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); - // But practice shows that 15% of reducing is quite enough. - // We are not greedy (it could lead to premature exit). - change = Math.min(15, change); - change = Math.max(0, change); // I think it will never happen but check for sure - // So this is the key point, here we are reducing % of caching blocks - cache.cacheDataBlockPercent -= change; - // If we go down too deep we have to stop here, 1% any way should be. - cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); - } - } else { - // Well, we have got overshooting. - // Mayby it is just short-term fluctuation and we can stay in this mode. - // It help avoid permature exit during short-term fluctuation. - // If overshooting less than 90%, we will try to increase the percent of - // caching blocks and hope it is enough. - if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { - // Simple logic: more overshooting - more caching blocks (backpressure) - int change = (int) (-freedDataOverheadPercent * 0.1 + 1); - cache.cacheDataBlockPercent += change; - // But it can't be more then 100%, so check it. - cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); - } else { - // Looks like heavy reading is over. - // Just exit form this mode. - heavyEvictionCount = 0; - cache.cacheDataBlockPercent = 100; - } - } - LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + - "heavy eviction counter: {}, " + - "current caching DataBlock (%): {}", - freedSumMb, freedDataOverheadPercent, - heavyEvictionCount, cache.cacheDataBlockPercent); - - freedSumMb = 0; - startTime = stopTime; - } + cache.evict(); } } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="This is what we want") + justification="This is what we want") public void evict() { synchronized (this) { this.notifyAll(); @@ -1160,20 +993,20 @@ public void logStats() { long totalSize = heapSize(); long freeSize = maxSize - totalSize; LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "max=" + StringUtils.byteDesc(this.maxSize) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount() + ", " + - "evictedPerRun=" + stats.evictedPerEviction()); + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + + "hitRatio=" + (stats.getHitCount() == 0 ? + "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + + "cachingHits=" + stats.getHitCachingCount() + ", " + + "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? + "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + + "evicted=" + stats.getEvictedCount() + ", " + + "evictedPerRun=" + stats.evictedPerEviction()); } /** @@ -1188,7 +1021,7 @@ public CacheStats getStats() { } public final static long CACHE_FIXED_OVERHEAD = - ClassSize.estimateBase(LruBlockCache.class, false); + ClassSize.estimateBase(LruBlockCache.class, false); @Override public long heapSize() { @@ -1198,8 +1031,8 @@ public long heapSize() { private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP - + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) - + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } @Override @@ -1337,7 +1170,6 @@ public void shutdown() { } /** Clears the cache. Used in tests. */ - @VisibleForTesting public void clearCache() { this.map.clear(); this.elements.set(0); @@ -1348,7 +1180,6 @@ public void clearCache() { * * @return the set of cached file names */ - @VisibleForTesting SortedSet getCachedFileNamesForTest() { SortedSet fileNames = new TreeSet<>(); for (BlockCacheKey cacheKey : map.keySet()) { @@ -1357,7 +1188,6 @@ SortedSet getCachedFileNamesForTest() { return fileNames; } - @VisibleForTesting public Map getEncodingCountsForTest() { Map counts = new EnumMap<>(DataBlockEncoding.class); for (LruCachedBlock block : map.values()) { @@ -1368,7 +1198,6 @@ public Map getEncodingCountsForTest() { return counts; } - @VisibleForTesting Map getMapForTests() { return map; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java new file mode 100644 index 000000000000..fa2f9afed5c2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java @@ -0,0 +1,1124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.nio.ByteBuffer; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.hfile.AdaptiveLruBlockCache.EvictionThread; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.ClassSize; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tests the concurrent AdaptiveLruBlockCache.

    + * + * Tests will ensure it grows and shrinks in size properly, + * evictions run when they're supposed to and do what they should, + * and that cached blocks are accessible when expected to be. + */ +@Category({IOTests.class, SmallTests.class}) +public class TestAdaptiveLruBlockCache { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAdaptiveLruBlockCache.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestAdaptiveLruBlockCache.class); + + @Test + public void testCacheEvictionThreadSafe() throws Exception { + long maxSize = 100000; + int numBlocks = 9; + int testRuns = 10; + final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final Configuration conf = HBaseConfiguration.create(); + final AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize); + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + final String hfileName = "hfile"; + int threads = 10; + final int blocksPerThread = 5 * numBlocks; + for (int run = 0; run != testRuns; ++run) { + final AtomicInteger blockCount = new AtomicInteger(0); + ExecutorService service = Executors.newFixedThreadPool(threads); + for (int i = 0; i != threads; ++i) { + service.execute(new Runnable() { + @Override + public void run() { + for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { + CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); + boolean inMemory = Math.random() > 0.5; + cache.cacheBlock(block.cacheKey, block, inMemory); + } + cache.evictBlocksByHfileName(hfileName); + } + }); + } + service.shutdown(); + // The test may fail here if the evict thread frees the blocks too fast + service.awaitTermination(10, TimeUnit.MINUTES); + Waiter.waitFor(conf, 10000, 100, new ExplainingPredicate() { + @Override + public boolean evaluate() throws Exception { + return cache.getBlockCount() == 0; + } + + @Override + public String explainFailure() throws Exception { + return "Cache block count failed to return to 0"; + } + }); + assertEquals(0, cache.getBlockCount()); + assertEquals(cache.getOverhead(), cache.getCurrentSize()); + } + } + @Test + public void testBackgroundEvictionThread() throws Exception { + long maxSize = 100000; + int numBlocks = 9; + long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize,blockSize); + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + + CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block"); + + // Make sure eviction thread has entered run method + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + + // Add all the blocks + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + } + + // wait until at least one eviction has run + int n = 0; + while(cache.getStats().getEvictionCount() == 0) { + Thread.sleep(200); + assertTrue("Eviction never happened.", n++ < 20); + } + + // let cache stabilize + // On some systems, the cache will run multiple evictions before it attains + // steady-state. For instance, after populating the cache with 10 blocks, + // the first eviction evicts a single block and then a second eviction + // evicts another. I think this is due to the delta between minSize and + // acceptableSize, combined with variance between object overhead on + // different environments. + n = 0; + for (long prevCnt = 0 /* < number of blocks added */, + curCnt = cache.getBlockCount(); + prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { + Thread.sleep(200); + assertTrue("Cache never stabilized.", n++ < 20); + } + + long evictionCount = cache.getStats().getEvictionCount(); + assertTrue(evictionCount >= 1); + System.out.println("Background Evictions run: " + evictionCount); + } + + @Test + public void testCacheSimple() throws Exception { + + long maxSize = 1000000; + long blockSize = calculateBlockSizeDefault(maxSize, 101); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize); + + CachedItem [] blocks = generateRandomBlocks(100, blockSize); + + long expectedCacheSize = cache.heapSize(); + + // Confirm empty + for (CachedItem block : blocks) { + assertTrue(cache.getBlock(block.cacheKey, true, false, true) == null); + } + + // Add blocks + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + expectedCacheSize += block.cacheBlockHeapSize(); + } + + // Verify correctly calculated cache heap size + assertEquals(expectedCacheSize, cache.heapSize()); + + // Check if all blocks are properly cached and retrieved + for (CachedItem block : blocks) { + HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); + assertTrue(buf != null); + assertEquals(buf.heapSize(), block.heapSize()); + } + + // Re-add same blocks and ensure nothing has changed + long expectedBlockCount = cache.getBlockCount(); + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + } + assertEquals( + "Cache should ignore cache requests for blocks already in cache", + expectedBlockCount, cache.getBlockCount()); + + // Verify correctly calculated cache heap size + assertEquals(expectedCacheSize, cache.heapSize()); + + // Check if all blocks are properly cached and retrieved + for (CachedItem block : blocks) { + HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); + assertTrue(buf != null); + assertEquals(buf.heapSize(), block.heapSize()); + } + + // Expect no evictions + assertEquals(0, cache.getStats().getEvictionCount()); + Thread t = new AdaptiveLruBlockCache.StatisticsThread(cache); + t.start(); + t.join(); + } + + @Test + public void testCacheEvictionSimple() throws Exception { + + long maxSize = 100000; + long blockSize = calculateBlockSizeDefault(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize,blockSize,false); + + CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block"); + + long expectedCacheSize = cache.heapSize(); + + // Add all the blocks + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + expectedCacheSize += block.cacheBlockHeapSize(); + } + + // A single eviction run should have occurred + assertEquals(1, cache.getStats().getEvictionCount()); + + // Our expected size overruns acceptable limit + assertTrue(expectedCacheSize > + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // But the cache did not grow beyond max + assertTrue(cache.heapSize() < maxSize); + + // And is still below the acceptable limit + assertTrue(cache.heapSize() < + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // All blocks except block 0 should be in the cache + assertTrue(cache.getBlock(blocks[0].cacheKey, true, false, true) == null); + for(int i=1;i + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // But the cache did not grow beyond max + assertTrue(cache.heapSize() <= maxSize); + + // And is now below the acceptable limit + assertTrue(cache.heapSize() <= + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // We expect fairness across the two priorities. + // This test makes multi go barely over its limit, in-memory + // empty, and the rest in single. Two single evictions and + // one multi eviction expected. + assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true, false, true) == null); + assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true, false, true) == null); + + // And all others to be cached + for(int i=1;i<4;i++) { + assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false, true), + singleBlocks[i]); + assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false, true), + multiBlocks[i]); + } + } + + @Test + public void testCacheEvictionThreePriorities() throws Exception { + + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); + CachedItem [] memoryBlocks = generateFixedBlocks(5, blockSize, "memory"); + + long expectedCacheSize = cache.heapSize(); + + // Add 3 blocks from each priority + for(int i=0;i<3;i++) { + + // Just add single blocks + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); + + // Add and get multi blocks + cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); + expectedCacheSize += multiBlocks[i].cacheBlockHeapSize(); + cache.getBlock(multiBlocks[i].cacheKey, true, false, true); + + // Add memory blocks as such + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); + expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize(); + + } + + // Do not expect any evictions yet + assertEquals(0, cache.getStats().getEvictionCount()); + + // Verify cache size + assertEquals(expectedCacheSize, cache.heapSize()); + + // Insert a single block, oldest single should be evicted + cache.cacheBlock(singleBlocks[3].cacheKey, singleBlocks[3]); + + // Single eviction, one thing evicted + assertEquals(1, cache.getStats().getEvictionCount()); + assertEquals(1, cache.getStats().getEvictedCount()); + + // Verify oldest single block is the one evicted + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); + + // Change the oldest remaining single block to a multi + cache.getBlock(singleBlocks[1].cacheKey, true, false, true); + + // Insert another single block + cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]); + + // Two evictions, two evicted. + assertEquals(2, cache.getStats().getEvictionCount()); + assertEquals(2, cache.getStats().getEvictedCount()); + + // Oldest multi block should be evicted now + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + + // Insert another memory block + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); + + // Three evictions, three evicted. + assertEquals(3, cache.getStats().getEvictionCount()); + assertEquals(3, cache.getStats().getEvictedCount()); + + // Oldest memory block should be evicted now + assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); + + // Add a block that is twice as big (should force two evictions) + CachedItem [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big"); + cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]); + + // Four evictions, six evicted (inserted block 3X size, expect +3 evicted) + assertEquals(4, cache.getStats().getEvictionCount()); + assertEquals(6, cache.getStats().getEvictedCount()); + + // Expect three remaining singles to be evicted + assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); + + // Make the big block a multi block + cache.getBlock(bigBlocks[0].cacheKey, true, false, true); + + // Cache another single big block + cache.cacheBlock(bigBlocks[1].cacheKey, bigBlocks[1]); + + // Five evictions, nine evicted (3 new) + assertEquals(5, cache.getStats().getEvictionCount()); + assertEquals(9, cache.getStats().getEvictedCount()); + + // Expect three remaining multis to be evicted + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); + + // Cache a big memory block + cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true); + + // Six evictions, twelve evicted (3 new) + assertEquals(6, cache.getStats().getEvictionCount()); + assertEquals(12, cache.getStats().getEvictedCount()); + + // Expect three remaining in-memory to be evicted + assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[3].cacheKey, true, false, true)); + } + + @Test + public void testCacheEvictionInMemoryForceMode() throws Exception { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.98f, // min + 0.99f, // acceptable + 0.2f, // single + 0.3f, // multi + 0.5f, // memory + 1.2f, // limit + true, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); + CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); + + long expectedCacheSize = cache.heapSize(); + + // 0. Add 5 single blocks and 4 multi blocks to make cache full, si:mu:me = 5:4:0 + for(int i = 0; i < 4; i++) { + // Just add single blocks + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); + // Add and get multi blocks + cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); + expectedCacheSize += multiBlocks[i].cacheBlockHeapSize(); + cache.getBlock(multiBlocks[i].cacheKey, true, false, true); + } + // 5th single block + cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]); + expectedCacheSize += singleBlocks[4].cacheBlockHeapSize(); + // Do not expect any evictions yet + assertEquals(0, cache.getStats().getEvictionCount()); + // Verify cache size + assertEquals(expectedCacheSize, cache.heapSize()); + + // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1 + cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true); + // Single eviction, one block evicted + assertEquals(1, cache.getStats().getEvictionCount()); + assertEquals(1, cache.getStats().getEvictedCount()); + // Verify oldest single block (index = 0) is the one evicted + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); + + // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2 + cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true); + // Two evictions, two evicted. + assertEquals(2, cache.getStats().getEvictionCount()); + assertEquals(2, cache.getStats().getEvictedCount()); + // Current oldest single block (index = 1) should be evicted now + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + + // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6 + cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true); + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); + cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true); + cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true); + // Three evictions, three evicted. + assertEquals(6, cache.getStats().getEvictionCount()); + assertEquals(6, cache.getStats().getEvictedCount()); + // two oldest single blocks and two oldest multi blocks evicted + assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + + // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted + // si:mu:me = 0:0:9 + cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true); + cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true); + cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true); + // Three evictions, three evicted. + assertEquals(9, cache.getStats().getEvictionCount()); + assertEquals(9, cache.getStats().getEvictedCount()); + // one oldest single block and two oldest multi blocks evicted + assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[3].cacheKey, true, false, true)); + + // 5. Insert one memory block, the oldest memory evicted + // si:mu:me = 0:0:9 + cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true); + // one eviction, one evicted. + assertEquals(10, cache.getStats().getEvictionCount()); + assertEquals(10, cache.getStats().getEvictedCount()); + // oldest memory block evicted + assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); + + // 6. Insert one new single block, itself evicted immediately since + // all blocks in cache are memory-type which have higher priority + // si:mu:me = 0:0:9 (no change) + cache.cacheBlock(singleBlocks[9].cacheKey, singleBlocks[9]); + // one eviction, one evicted. + assertEquals(11, cache.getStats().getEvictionCount()); + assertEquals(11, cache.getStats().getEvictedCount()); + // the single block just cached now evicted (can't evict memory) + assertEquals(null, cache.getBlock(singleBlocks[9].cacheKey, true, false, true)); + } + + // test scan resistance + @Test + public void testScanResistance() throws Exception { + + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); + + // Add 5 multi blocks + for (CachedItem block : multiBlocks) { + cache.cacheBlock(block.cacheKey, block); + cache.getBlock(block.cacheKey, true, false, true); + } + + // Add 5 single blocks + for(int i=0;i<5;i++) { + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + } + + // An eviction ran + assertEquals(1, cache.getStats().getEvictionCount()); + + // To drop down to 2/3 capacity, we'll need to evict 4 blocks + assertEquals(4, cache.getStats().getEvictedCount()); + + // Should have been taken off equally from single and multi + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + + // Let's keep "scanning" by adding single blocks. From here on we only + // expect evictions from the single bucket. + + // Every time we reach 10 total blocks (every 4 inserts) we get 4 single + // blocks evicted. Inserting 13 blocks should yield 3 more evictions and + // 12 more evicted. + + for(int i=5;i<18;i++) { + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + } + + // 4 total evictions, 16 total evicted + assertEquals(4, cache.getStats().getEvictionCount()); + assertEquals(16, cache.getStats().getEvictedCount()); + + // Should now have 7 total blocks + assertEquals(7, cache.getBlockCount()); + + } + + @Test + public void testMaxBlockSize() throws Exception { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 1024, + 10, + 500, + 0.01f); + + CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); + CachedItem [] small = generateFixedBlocks(15, 600, "small"); + + + for (CachedItem i:tooLong) { + cache.cacheBlock(i.cacheKey, i); + } + for (CachedItem i:small) { + cache.cacheBlock(i.cacheKey, i); + } + assertEquals(15,cache.getBlockCount()); + for (CachedItem i:small) { + assertNotNull(cache.getBlock(i.cacheKey, true, false, false)); + } + for (CachedItem i:tooLong) { + assertNull(cache.getBlock(i.cacheKey, true, false, false)); + } + + assertEquals(10, cache.getStats().getFailedInserts()); + } + + // test setMaxSize + @Test + public void testResizeBlockCache() throws Exception { + + long maxSize = 300000; + long blockSize = calculateBlockSize(maxSize, 31); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); + CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); + + // Add all blocks from all priorities + for(int i=0;i<10;i++) { + + // Just add single blocks + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + + // Add and get multi blocks + cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); + cache.getBlock(multiBlocks[i].cacheKey, true, false, true); + + // Add memory blocks as such + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); + } + + // Do not expect any evictions yet + assertEquals(0, cache.getStats().getEvictionCount()); + + // Resize to half capacity plus an extra block (otherwise we evict an extra) + cache.setMaxSize((long)(maxSize * 0.5f)); + + // Should have run a single eviction + assertEquals(1, cache.getStats().getEvictionCount()); + + // And we expect 1/2 of the blocks to be evicted + assertEquals(15, cache.getStats().getEvictedCount()); + + // And the oldest 5 blocks from each category should be gone + for(int i=0;i<5;i++) { + assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); + } + + // And the newest 5 blocks should still be accessible + for(int i=5;i<10;i++) { + assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); + assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); + assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); + } + } + + // test metricsPastNPeriods + @Test + public void testPastNPeriodsMetrics() throws Exception { + double delta = 0.01; + + // 3 total periods + CacheStats stats = new CacheStats("test", 3); + + // No accesses, should be 0 + stats.rollMetricsPeriod(); + assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 1, 1 hit caching, 1 hit non-caching, 2 miss non-caching + // should be (2/4)=0.5 and (1/1)=1 + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(1.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 2, 1 miss caching, 3 miss non-caching + // should be (2/8)=0.25 and (1/2)=0.5 + stats.miss(true, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.25, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 3, 2 hits of each type + // should be (6/12)=0.5 and (3/4)=0.75 + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.75, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 4, evict period 1, two caching misses + // should be (4/10)=0.4 and (2/5)=0.4 + stats.miss(true, false, BlockType.DATA); + stats.miss(true, false, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.4, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.4, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 5, evict period 2, 2 caching misses, 2 non-caching hit + // should be (6/10)=0.6 and (2/6)=1/3 + stats.miss(true, false, BlockType.DATA); + stats.miss(true, false, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta); + assertEquals((double)1/3, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 6, evict period 3 + // should be (2/6)=1/3 and (0/4)=0 + stats.rollMetricsPeriod(); + assertEquals((double)1/3, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 7, evict period 4 + // should be (2/4)=0.5 and (0/2)=0 + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 8, evict period 5 + // should be 0 and 0 + stats.rollMetricsPeriod(); + assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 9, one of each + // should be (2/4)=0.5 and (1/2)=0.5 + stats.miss(true, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); + } + + @Test + public void testCacheBlockNextBlockMetadataMissing() { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + int size = 100; + int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; + byte[] byteArr = new byte[length]; + ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size); + HFileContext meta = new HFileContextBuilder().build(); + HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, HEAP); + HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, HEAP); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 1024, + 10, + 500, + 0.01f); + + BlockCacheKey key = new BlockCacheKey("key1", 0); + ByteBuffer actualBuffer = ByteBuffer.allocate(length); + ByteBuffer block1Buffer = ByteBuffer.allocate(length); + ByteBuffer block2Buffer = ByteBuffer.allocate(length); + blockWithNextBlockMetadata.serialize(block1Buffer, true); + blockWithoutNextBlockMetadata.serialize(block2Buffer, true); + + //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back. + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, + block1Buffer); + + //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back. + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, + block1Buffer); + + //Clear and add blockWithoutNextBlockMetadata + cache.clearCache(); + assertNull(cache.getBlock(key, false, false, false)); + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, + block2Buffer); + + //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace. + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, + block1Buffer); + } + + private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) { + CachedItem [] blocks = new CachedItem[numBlocks]; + for(int i=0;i getDeserializer() { + return null; + } + + @Override + public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { + } + + @Override + public BlockType getBlockType() { + return BlockType.DATA; + } + } + + static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exception { + int size = 100; + int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; + byte[] byteArr = new byte[length]; + HFileContext meta = new HFileContextBuilder().build(); + BlockCacheKey key = new BlockCacheKey("key1", 0); + HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, + HEAP); + AtomicBoolean err1 = new AtomicBoolean(false); + Thread t1 = new Thread(() -> { + for (int i = 0; i < 10000 && !err1.get(); i++) { + try { + cache.getBlock(key, false, false, true); + } catch (Exception e) { + err1.set(true); + LOG.info("Cache block or get block failure: ", e); + } + } + }); + + AtomicBoolean err2 = new AtomicBoolean(false); + Thread t2 = new Thread(() -> { + for (int i = 0; i < 10000 && !err2.get(); i++) { + try { + cache.evictBlock(key); + } catch (Exception e) { + err2.set(true); + LOG.info("Evict block failure: ", e); + } + } + }); + + AtomicBoolean err3 = new AtomicBoolean(false); + Thread t3 = new Thread(() -> { + for (int i = 0; i < 10000 && !err3.get(); i++) { + try { + cache.cacheBlock(key, blk); + } catch (Exception e) { + err3.set(true); + LOG.info("Cache block failure: ", e); + } + } + }); + t1.start(); + t2.start(); + t3.start(); + t1.join(); + t2.join(); + t3.join(); + Assert.assertFalse(err1.get()); + Assert.assertFalse(err2.get()); + Assert.assertFalse(err3.get()); + } + + @Test + public void testMultiThreadGetAndEvictBlock() throws Exception { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + AdaptiveLruBlockCache cache = + new AdaptiveLruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024, + 10, + 500, + 0.01f); + testMultiThreadGetAndEvictBlockInternal(cache); + } + + public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { + long maxSize = 100000000; + int numBlocks = 100000; + final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final AdaptiveLruBlockCache cache = + new AdaptiveLruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + heavyEvictionCountLimit, + 200, + 0.01f); + + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + + final String hfileName = "hfile"; + for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { + CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); + cache.cacheBlock(block.cacheKey, block, false); + if (cache.getCacheDataBlockPercent() < 70) { + // enough for test + break; + } + } + + evictionThread.evict(); + Thread.sleep(100); + + if (heavyEvictionCountLimit == 0) { + // Check if all offset (last two digits) of cached blocks less than the percent. + // It means some of blocks haven't put into BlockCache + assertTrue(cache.getCacheDataBlockPercent() < 90); + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + assertTrue(!(key.getOffset() % 100 > 90)); + } + } else { + // Check that auto-scaling is not working (all blocks in BlockCache) + assertTrue(cache.getCacheDataBlockPercent() == 100); + int counter = 0; + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + if (key.getOffset() % 100 > 90) { + counter++; + } + } + assertTrue(counter > 1000); + } + evictionThread.shutdown(); + } + + @Test + public void testSkipCacheDataBlocks() throws Exception { + // Check that auto-scaling will work right after start + testSkipCacheDataBlocksInteral(0); + // Check that auto-scaling will not work right after start + // (have to finished before auto-scaling) + testSkipCacheDataBlocksInteral(100); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index b30575ebc55c..afaf85f5b2a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -61,7 +61,7 @@ public class TestLruBlockCache { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLruBlockCache.class); + HBaseClassTestRule.forClass(TestLruBlockCache.class); private static final Logger LOG = LoggerFactory.getLogger(TestLruBlockCache.class); @@ -156,8 +156,8 @@ public void testBackgroundEvictionThread() throws Exception { // different environments. n = 0; for (long prevCnt = 0 /* < number of blocks added */, - curCnt = cache.getBlockCount(); - prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { + curCnt = cache.getBlockCount(); + prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { Thread.sleep(200); assertTrue("Cache never stabilized.", n++ < 20); } @@ -206,8 +206,8 @@ public void testCacheSimple() throws Exception { cache.cacheBlock(block.cacheKey, block); } assertEquals( - "Cache should ignore cache requests for blocks already in cache", - expectedBlockCount, cache.getBlockCount()); + "Cache should ignore cache requests for blocks already in cache", + expectedBlockCount, cache.getBlockCount()); // Verify correctly calculated cache heap size assertEquals(expectedCacheSize, cache.heapSize()); @@ -256,13 +256,13 @@ public void testCacheEvictionSimple() throws Exception { // And is still below the acceptable limit assertTrue(cache.heapSize() < - (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // All blocks except block 0 should be in the cache assertTrue(cache.getBlock(blocks[0].cacheKey, true, false, true) == null); for(int i=1;i { for (int i = 0; i < 10000 && !err1.get(); i++) { @@ -1037,89 +1018,15 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, 1024, - 10, - 500, - 0.01f); - testMultiThreadGetAndEvictBlockInternal(cache); - } - - public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { - long maxSize = 100000000; - int numBlocks = 100000; - final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); - - final LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.5f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - maxSize, - heavyEvictionCountLimit, - 200, - 0.01f); - - EvictionThread evictionThread = cache.getEvictionThread(); - assertTrue(evictionThread != null); - while (!evictionThread.isEnteringRun()) { - Thread.sleep(1); - } - - final String hfileName = "hfile"; - for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { - CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); - cache.cacheBlock(block.cacheKey, block, false); - if (cache.getCacheDataBlockPercent() < 70) { - // enough for test - break; - } - } - - evictionThread.evict(); - Thread.sleep(100); - - if (heavyEvictionCountLimit == 0) { - // Check if all offset (last two digits) of cached blocks less than the percent. - // It means some of blocks haven't put into BlockCache - assertTrue(cache.getCacheDataBlockPercent() < 90); - for (BlockCacheKey key : cache.getMapForTests().keySet()) { - assertTrue(!(key.getOffset() % 100 > 90)); - } - } else { - // Check that auto-scaling is not working (all blocks in BlockCache) - assertTrue(cache.getCacheDataBlockPercent() == 100); - int counter = 0; - for (BlockCacheKey key : cache.getMapForTests().keySet()) { - if (key.getOffset() % 100 > 90) { - counter++; - } - } - assertTrue(counter > 1000); - } - evictionThread.shutdown(); - } - - @Test - public void testSkipCacheDataBlocks() throws Exception { - // Check that auto-scaling will work right after start - testSkipCacheDataBlocksInteral(0); - // Check that auto-scaling will not work right after start - // (have to finished before auto-scaling) - testSkipCacheDataBlocksInteral(100); + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024); + testMultiThreadGetAndEvictBlockInternal(cache); } - } From 8fdac16d2ebbe5a3f167a901a69c6a245275e79a Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:12:05 +0300 Subject: [PATCH 361/769] Added AdaptiveLruBlockCache --- .../apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index a57464bb1d6d..a72e86b132a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -134,8 +134,8 @@ * can adjust to it and set the coefficient to lower value. * For example, we set the coefficient = 0.01. It means the overhead (see above) will be * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, - * if the overhead = 300% and the coefficient = 0.01, - * then percent of caching blocks will reduce by 3%. + * if the overhead = 300% and the coefficient = 0.01, * then percent of caching blocks will + * reduce by 3%. * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term * fluctuation and we will try to stay in this mode. It helps avoid premature exit during * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. From f9dfbd53923d647b64648cbc4606e83ac22e14c5 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 18 Sep 2020 13:20:44 -0700 Subject: [PATCH 362/769] HBASE-25061 Update default URL to KEYS file in `hbase-vote.sh` (#2416) Co-authored-by: Viraj Jasani Signed-off-by: Sean Busbey Signed-off-by: Jan Hentschel Signed-off-by: Viraj Jasani --- dev-support/hbase-vote.sh | 4 ++-- src/main/asciidoc/_chapters/developer.adoc | 2 +- src/site/xdoc/downloads.xml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/hbase-vote.sh b/dev-support/hbase-vote.sh index ec9340a0b0e1..88e22849a92f 100755 --- a/dev-support/hbase-vote.sh +++ b/dev-support/hbase-vote.sh @@ -37,7 +37,7 @@ Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file- e.g. https://dist.apache.org/repos/dist/dev/hbase/hbase-RC0/ -k | --key '' A signature of the public key, e.g. 9AD2AE49 -f | --keys-file-url '' the URL of the key file, default is - http://www.apache.org/dist/hbase/KEYS + https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests __EOF @@ -103,7 +103,7 @@ BUILD_FROM_SOURCE_PASSED=0 UNIT_TEST_PASSED=0 function download_and_import_keys() { - KEY_FILE_URL="${KEY_FILE_URL:-https://www.apache.org/dist/hbase/KEYS}" + KEY_FILE_URL="${KEY_FILE_URL:-https://downloads.apache.org/hbase/KEYS}" echo "Obtain and import the publisher key(s) from ${KEY_FILE_URL}" # download the keys file into file KEYS wget -O KEYS "${KEY_FILE_URL}" diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 6987ffd6b637..27c369255459 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -1160,7 +1160,7 @@ Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-f e.g. https://dist.apache.org/repos/dist/dev/hbase/hbase-RC0/ -k | --key '' A signature of the public key, e.g. 9AD2AE49 -f | --keys-file-url '' the URL of the key file, default is - http://www.apache.org/dist/hbase/KEYS + https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target ---- * If you see any unit test failures, please call out the solo test result and whether it's part of flaky (nightly) tests dashboard, e.g. link:https://builds.apache.org/view/H-L/view/HBase/job/HBase-Find-Flaky-Tests/job/master/lastSuccessfulBuild/artifact/dashboard.html[dashboard of master branch] (please change the test branch accordingly). diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 822a819b31d7..c49f09a9177e 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -28,7 +28,7 @@ under the License.

    The below table lists mirrored release artifacts and their associated hashes and signatures available ONLY at apache.org. The keys used to sign releases can be found in our published - KEYS file. See + KEYS file. See Verify The Integrity Of The Files for how to verify your mirrored downloads.

    From 282b2b7195d16e920ab9c06b60510355f9c050ab Mon Sep 17 00:00:00 2001 From: GeorryHuang <215175212@qq.com> Date: Sat, 19 Sep 2020 14:34:30 +0800 Subject: [PATCH 363/769] HBASE-24857:Fix several problems when starting webUI (#2245) Signed-off-by: Viraj Jasani Signed-off-by: Duo Zhang --- bin/hbase | 2 +- .../java/org/apache/hadoop/hbase/tool/CanaryTool.java | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/bin/hbase b/bin/hbase index 127fa3c7fdd8..dd6cfeef644f 100755 --- a/bin/hbase +++ b/bin/hbase @@ -258,7 +258,7 @@ if [ "${INTERNAL_CLASSPATH}" != "true" ]; then # If command needs our shaded mapreduce, use it # N.B "mapredcp" is not included here because in the shaded case it skips our built classpath - declare -a commands_in_mr_jar=("hbck" "snapshot" "canary" "regionsplitter" "pre-upgrade") + declare -a commands_in_mr_jar=("hbck" "snapshot" "regionsplitter" "pre-upgrade") for c in "${commands_in_mr_jar[@]}"; do if [ "${COMMAND}" = "${c}" ]; then # If we didn't find a jar above, this will just be blank and the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index d42e62991596..59e0e6cc7916 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -140,7 +140,7 @@ private void putUpWebUI() throws IOException { try { InfoServer infoServer = new InfoServer("canary", addr, port, false, conf); infoServer.addUnprivilegedServlet("canary", "/canary-status", CanaryStatusServlet.class); - infoServer.setAttribute("sink", this.sink); + infoServer.setAttribute("sink", getSink(conf, RegionStdOutSink.class)); infoServer.start(); LOG.info("Bind Canary http info server to {}:{} ", addr, port); } catch (BindException e) { @@ -979,8 +979,10 @@ public int run(String[] args) throws Exception { monitorTargets = new String[length]; System.arraycopy(args, index, monitorTargets, 0, length); } - - putUpWebUI(); + if (interval > 0) { + //Only show the web page in daemon mode + putUpWebUI(); + } if (zookeeperMode) { return checkZooKeeper(); } else if (regionServerMode) { From f0dfd0e1448d926ee8cff0604483936479d179eb Mon Sep 17 00:00:00 2001 From: Hyeran Lee Date: Sat, 19 Sep 2020 15:36:06 +0900 Subject: [PATCH 364/769] HBASE-25057: Fix typo memeber (#2414) Signed-off-by: Viraj Jasani Signed-off-by: Duo Zhang Signed-off-by: Jan Hentschel --- .../regionserver/LogRollRegionServerProcedureManager.java | 2 +- .../java/org/apache/hadoop/hbase/procedure/Subprocedure.java | 4 ++-- .../flush/RegionServerFlushTableProcedureManager.java | 2 +- .../regionserver/snapshot/RegionServerSnapshotManager.java | 2 +- .../hadoop/hbase/procedure/SimpleRSProcedureManager.java | 2 +- .../apache/hadoop/hbase/procedure/TestProcedureMember.java | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index 5d087a65f91f..f09e71005598 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -116,7 +116,7 @@ public void stop(boolean force) throws IOException { /** * If in a running state, creates the specified subprocedure for handling a backup procedure. - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(byte[] data) { // don't run a backup if the parent is stop(ping) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java index 4b6924438377..9e45ad514369 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java @@ -39,7 +39,7 @@ * member), {@link #insideBarrier()} (execute while globally barriered and release barrier) and * {@link #cleanup(Exception)} (release state associated with subprocedure.) * - * When submitted to a ProcedureMemeber, the call method is executed in a separate thread. + * When submitted to a ProcedureMember, the call method is executed in a separate thread. * Latches are use too block its progress and trigger continuations when barrier conditions are * met. * @@ -147,7 +147,7 @@ private void rethrowException() throws ForeignException { * Execute the Subprocedure {@link #acquireBarrier()} and {@link #insideBarrier()} methods * while keeping some state for other threads to access. * - * This would normally be executed by the ProcedureMemeber when a acquire message comes from the + * This would normally be executed by the ProcedureMember when a acquire message comes from the * coordinator. Rpcs are used to spend message back to the coordinator after different phases * are executed. Any exceptions caught during the execution (except for InterruptedException) get * converted and propagated to coordinator via {@link ProcedureMemberRpcs#sendMemberAborted( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 0a72d9a738a5..1e95d15881fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -132,7 +132,7 @@ public void stop(boolean force) throws IOException { * * @param table * @param family - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(String table, String family) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index cc92003315f2..a01d118718d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -150,7 +150,7 @@ public void stop(boolean force) throws IOException { * the snapshot verification step. * * @param snapshot - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index cee0656443b7..9ccee661586a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -85,7 +85,7 @@ public String getProcedureSignature() { /** * If in a running state, creates the specified subprocedure for handling a procedure. - * @return Subprocedure to submit to the ProcedureMemeber. + * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(String name) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java index b95ddf20aad9..61146a6c7070 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java @@ -204,7 +204,7 @@ public void testSendMemberAcquiredCommsFailure() throws Exception { new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { - throw new IOException("Forced IOException in memeber prepare"); + throw new IOException("Forced IOException in member prepare"); } }).when(mockMemberComms).sendMemberAcquired(any()); @@ -288,7 +288,7 @@ public void testMemberCommitException() throws Exception { new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { - throw new IOException("Forced IOException in memeber prepare"); + throw new IOException("Forced IOException in member prepare"); } }).when(spySub).insideBarrier(); From fb85c13f3de7aa789a622de46168a13681448cc2 Mon Sep 17 00:00:00 2001 From: Joseph295 <517536891@qq.com> Date: Sat, 19 Sep 2020 14:53:13 +0800 Subject: [PATCH 365/769] HBASE-24991 Replace MovedRegionsCleaner with guava cache (#2357) Signed-off-by: stack Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/HRegionServer.java | 112 +++--------------- .../hadoop/hbase/TestMovedRegionCache.java | 104 ++++++++++++++++ .../hadoop/hbase/TestMovedRegionsCleaner.java | 95 --------------- .../regionserver/TestRSChoresScheduled.java | 7 -- 4 files changed, 118 insertions(+), 200 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index fc0e3d75f592..c33be53d4538 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -277,6 +277,13 @@ public class HRegionServer extends Thread implements private final Cache executedRegionProcedures = CacheBuilder.newBuilder().expireAfterAccess(600, TimeUnit.SECONDS).build(); + /** + * Used to cache the moved-out regions + */ + private final Cache movedRegionInfoCache = + CacheBuilder.newBuilder().expireAfterWrite(movedRegionCacheExpiredTime(), + TimeUnit.MILLISECONDS).build(); + private MemStoreFlusher cacheFlusher; private HeapMemoryManager hMemManager; @@ -476,11 +483,6 @@ public class HRegionServer extends Thread implements */ protected String clusterId; - /** - * Chore to clean periodically the moved region list - */ - private MovedRegionsCleaner movedRegionsCleaner; - // chore for refreshing store files for secondary regions private StorefileRefresherChore storefileRefresher; @@ -1079,10 +1081,6 @@ public void run() { mobFileCache.shutdown(); } - if (movedRegionsCleaner != null) { - movedRegionsCleaner.stop("Region Server stopping"); - } - // Send interrupts to wake up threads if sleeping so they notice shutdown. // TODO: Should we check they are alive? If OOME could have exited already if (this.hMemManager != null) this.hMemManager.stop(); @@ -2051,9 +2049,6 @@ private void startServices() throws IOException { if (this.storefileRefresher != null) { choreService.scheduleChore(storefileRefresher); } - if (this.movedRegionsCleaner != null) { - choreService.scheduleChore(movedRegionsCleaner); - } if (this.fsUtilizationChore != null) { choreService.scheduleChore(fsUtilizationChore); } @@ -2111,9 +2106,6 @@ private void initializeThreads() { slowLogTableOpsChore = new SlowLogTableOpsChore(this, duration, this.namedQueueRecorder); } - // Create the thread to clean the moved regions list - movedRegionsCleaner = MovedRegionsCleaner.create(this); - if (this.nonceManager != null) { // Create the scheduled chore that cleans up nonces. nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this); @@ -2614,7 +2606,6 @@ protected void stopServiceThreads() { choreService.cancelChore(healthCheckChore); choreService.cancelChore(executorStatusChore); choreService.cancelChore(storefileRefresher); - choreService.cancelChore(movedRegionsCleaner); choreService.cancelChore(fsUtilizationChore); choreService.cancelChore(slowLogTableOpsChore); // clean up the remaining scheduled chores (in case we missed out any) @@ -3485,12 +3476,10 @@ public ServerNonceManager getNonceManager() { private static class MovedRegionInfo { private final ServerName serverName; private final long seqNum; - private final long moveTime; MovedRegionInfo(ServerName serverName, long closeSeqNum) { this.serverName = serverName; this.seqNum = closeSeqNum; - this.moveTime = EnvironmentEdgeManager.currentTime(); } public ServerName getServerName() { @@ -3500,18 +3489,8 @@ public ServerName getServerName() { public long getSeqNum() { return seqNum; } - - long getMoveTime() { - return moveTime; - } } - /** - * This map will contains all the regions that we closed for a move. - * We add the time it was moved as we don't want to keep too old information - */ - private Map movedRegions = new ConcurrentHashMap<>(3000); - /** * We need a timeout. If not there is a risk of giving a wrong information: this would double * the number of network calls instead of reducing them. @@ -3525,86 +3504,23 @@ private void addToMovedRegions(String encodedName, ServerName destination, long } LOG.info("Adding " + encodedName + " move to " + destination + " record at close sequenceid=" + closeSeqNum); - movedRegions.put(encodedName, new MovedRegionInfo(destination, closeSeqNum)); + movedRegionInfoCache.put(encodedName, new MovedRegionInfo(destination, closeSeqNum)); } void removeFromMovedRegions(String encodedName) { - movedRegions.remove(encodedName); - } - - private MovedRegionInfo getMovedRegion(final String encodedRegionName) { - MovedRegionInfo dest = movedRegions.get(encodedRegionName); - - long now = EnvironmentEdgeManager.currentTime(); - if (dest != null) { - if (dest.getMoveTime() > (now - TIMEOUT_REGION_MOVED)) { - return dest; - } else { - movedRegions.remove(encodedRegionName); - } - } - - return null; + movedRegionInfoCache.invalidate(encodedName); } - /** - * Remove the expired entries from the moved regions list. - */ - protected void cleanMovedRegions() { - final long cutOff = System.currentTimeMillis() - TIMEOUT_REGION_MOVED; - - movedRegions.entrySet().removeIf(e -> e.getValue().getMoveTime() < cutOff); + @VisibleForTesting + public MovedRegionInfo getMovedRegion(String encodedRegionName) { + return movedRegionInfoCache.getIfPresent(encodedRegionName); } - /* - * Use this to allow tests to override and schedule more frequently. - */ - - protected int movedRegionCleanerPeriod() { + @VisibleForTesting + public int movedRegionCacheExpiredTime() { return TIMEOUT_REGION_MOVED; } - /** - * Creates a Chore thread to clean the moved region cache. - */ - protected final static class MovedRegionsCleaner extends ScheduledChore implements Stoppable { - private HRegionServer regionServer; - Stoppable stoppable; - - private MovedRegionsCleaner( - HRegionServer regionServer, Stoppable stoppable){ - super("MovedRegionsCleaner for region " + regionServer, stoppable, - regionServer.movedRegionCleanerPeriod()); - this.regionServer = regionServer; - this.stoppable = stoppable; - } - - static MovedRegionsCleaner create(HRegionServer rs){ - Stoppable stoppable = new Stoppable() { - private volatile boolean isStopped = false; - @Override public void stop(String why) { isStopped = true;} - @Override public boolean isStopped() {return isStopped;} - }; - - return new MovedRegionsCleaner(rs, stoppable); - } - - @Override - protected void chore() { - regionServer.cleanMovedRegions(); - } - - @Override - public void stop(String why) { - stoppable.stop(why); - } - - @Override - public boolean isStopped() { - return stoppable.isStopped(); - } - } - private String getMyEphemeralNodePath() { return ZNodePaths.joinZNode(this.zooKeeper.getZNodePaths().rsZNode, getServerName().toString()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java new file mode 100644 index 000000000000..ea0b9f87e173 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionCache.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.io.IOException; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Test whether moved region cache is correct + */ +@Category({ MiscTests.class, MediumTests.class }) +public class TestMovedRegionCache { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMovedRegionCache.class); + + @Rule + public TestName name = new TestName(); + + private HBaseTestingUtility UTIL; + private MiniZooKeeperCluster zkCluster; + private HRegionServer source; + private HRegionServer dest; + private RegionInfo movedRegionInfo; + + @Before + public void setup() throws Exception { + UTIL = new HBaseTestingUtility(); + zkCluster = UTIL.startMiniZKCluster(); + StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(2).build(); + MiniHBaseCluster cluster = UTIL.startMiniHBaseCluster(option); + source = cluster.getRegionServer(0); + dest = cluster.getRegionServer(1); + assertEquals(2, cluster.getRegionServerThreads().size()); + TableName tableName = TableName.valueOf(name.getMethodName()); + UTIL.createTable(tableName, Bytes.toBytes("cf")); + UTIL.waitTableAvailable(tableName, 30_000); + movedRegionInfo = Iterables.getOnlyElement(cluster.getRegions(tableName)).getRegionInfo(); + UTIL.getAdmin().move(movedRegionInfo.getEncodedNameAsBytes(), source.getServerName()); + UTIL.waitFor(2000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return source.getOnlineRegion(movedRegionInfo.getRegionName()) != null; + } + }); + } + + @After + public void after() throws Exception { + UTIL.shutdownMiniCluster(); + if (zkCluster != null) { + zkCluster.shutdown(); + } + } + + @Test + public void testMovedRegionsCache() throws IOException, InterruptedException { + UTIL.getAdmin().move(movedRegionInfo.getEncodedNameAsBytes(), dest.getServerName()); + UTIL.waitFor(2000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return dest.getOnlineRegion(movedRegionInfo.getRegionName()) != null; + } + }); + assertNotNull("Moved region NOT in the cache!", source.getMovedRegion( + movedRegionInfo.getEncodedName())); + Thread.sleep(source.movedRegionCacheExpiredTime()); + assertNull("Expired moved region exist in the cache!", source.getMovedRegion( + movedRegionInfo.getEncodedName())); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java deleted file mode 100644 index 8932646ef9b9..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test whether background cleanup of MovedRegion entries is happening - */ -@Category({ MiscTests.class, MediumTests.class }) -public class TestMovedRegionsCleaner { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMovedRegionsCleaner.class); - - private final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - - public static int numCalls = 0; - - private static class TestMockRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer { - - public TestMockRegionServer(Configuration conf) throws IOException, InterruptedException { - super(conf); - } - - @Override - protected int movedRegionCleanerPeriod() { - return 500; - } - - @Override protected void cleanMovedRegions() { - // count the number of calls that are being made to this - // - numCalls++; - super.cleanMovedRegions(); - } - } - - @After public void after() throws Exception { - UTIL.shutdownMiniCluster(); - } - - @Before public void before() throws Exception { - UTIL.getConfiguration() - .setStrings(HConstants.REGION_SERVER_IMPL, TestMockRegionServer.class.getName()); - UTIL.startMiniCluster(1); - } - - /** - * Start the cluster, wait for some time and verify that the background - * MovedRegion cleaner indeed gets called - * - * @throws IOException - * @throws InterruptedException - */ - @Test public void testMovedRegionsCleaner() throws IOException, InterruptedException { - // We need to sleep long enough to trigger at least one round of background calls - // to MovedRegionCleaner happen. Currently the period is set to 500ms. - // Setting the sleep here for 2s just to be safe - // - UTIL.waitFor(2000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws IOException { - - // verify that there was at least one call to the cleanMovedRegions function - // - return numCalls > 0; - } - }); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java index de078618f829..95ce746ec5f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java @@ -78,13 +78,6 @@ private void testIfChoreScheduled(E choreObj) { @Test public void testDefaultScheduledChores() throws Exception { - // test if movedRegionsCleaner chore is scheduled by default in HRegionServer init - TestChoreField movedRegionsCleanerTestChoreField = - new TestChoreField<>(); - HRegionServer.MovedRegionsCleaner movedRegionsCleaner = movedRegionsCleanerTestChoreField - .getChoreObj("movedRegionsCleaner"); - movedRegionsCleanerTestChoreField.testIfChoreScheduled(movedRegionsCleaner); - // test if compactedHFilesDischarger chore is scheduled by default in HRegionServer init TestChoreField compactedHFilesDischargerTestChoreField = new TestChoreField<>(); From 2d2e7895bd044003f64254d0de6a368abfc9a50c Mon Sep 17 00:00:00 2001 From: KevinSmile Date: Sat, 19 Sep 2020 19:45:10 +0530 Subject: [PATCH 366/769] HBASE-24481 REST - Fix incorrect response code of get-regions in rest api Closes #2425 Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/rest/RegionsResource.java | 3 +++ .../org/apache/hadoop/hbase/rest/TestTableResource.java | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index c3eed6a2eecd..6d6293fb1647 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -75,6 +75,9 @@ public Response get(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); try { TableName tableName = TableName.valueOf(tableResource.getName()); + if (!tableResource.exists()) { + throw new TableNotFoundException(tableName); + } TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); List locs; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index 0bece66df380..0c83a7fdeabc 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -261,5 +261,14 @@ public void testTableInfoPB() throws IOException, JAXBException { checkTableInfo(model); } + @Test + public void testTableNotFound() throws IOException { + String notExistTable = "notexist"; + Response response1 = client.get("/" + notExistTable + "/schema", Constants.MIMETYPE_JSON); + assertEquals(404, response1.getCode()); + Response response2 = client.get("/" + notExistTable + "/regions", Constants.MIMETYPE_XML); + assertEquals(404, response2.getCode()); + } + } From 19d1ef956d55b632f47af8e4537a2759e9c19cbc Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Sun, 20 Sep 2020 06:30:28 +0530 Subject: [PATCH 367/769] HBASE-25069: Display region name instead of encoded region name in HBCK report page. (#2428) Signed-off-by: Guanghao Zhang --- .../main/resources/hbase-webapps/master/hbck.jsp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp index 12757c6ad5ee..69b95e1a118e 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp @@ -247,8 +247,8 @@ <% for (Pair p : report.getHoles()) { %> - <%= p.getFirst().getEncodedName() %> - <%= p.getSecond().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> + <%= p.getSecond().getRegionNameAsString() %> <% } %> @@ -275,14 +275,14 @@ <% for (Pair p : report.getOverlaps()) { %> <% if (report.getMergedRegions().containsKey(p.getFirst())) { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <% } else { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <% } %> <% if (report.getMergedRegions().containsKey(p.getSecond())) { %> - <%= p.getSecond().getEncodedName() %> + <%= p.getSecond().getRegionNameAsString() %> <% } else { %> - <%= p.getSecond().getEncodedName() %> + <%= p.getSecond().getRegionNameAsString() %> <% } %> <% } %> @@ -318,7 +318,7 @@ <% for (Pair p: report.getUnknownServers()) { %> - <%= p.getFirst().getEncodedName() %> + <%= p.getFirst().getRegionNameAsString() %> <%= p.getSecond() %> <% } %> From dff566e8337fd8fb45bb972be124ab6e971a8722 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 20 Sep 2020 11:25:22 +0800 Subject: [PATCH 368/769] HBASE-25066 Use FutureUtils.rethrow in AsyncTableResultScanner to better catch the stack trace (#2420) Signed-off-by: Guanghao Zhang --- .../hbase/client/AsyncTableResultScanner.java | 9 +++--- .../apache/hadoop/hbase/util/FutureUtils.java | 32 +++++++++++-------- .../hadoop/hbase/util/TestFutureUtils.java | 2 +- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java index cd5d5adb290a..7fe6d120c3f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java @@ -23,13 +23,13 @@ import java.io.InterruptedIOException; import java.util.ArrayDeque; import java.util.Queue; - +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; /** * The {@link ResultScanner} implementation for {@link AsyncTable}. It will fetch data automatically @@ -140,8 +140,7 @@ public synchronized Result next() throws IOException { return null; } if (error != null) { - Throwables.propagateIfPossible(error, IOException.class); - throw new IOException(error); + FutureUtils.rethrow(error); } try { wait(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java index dfd9ead27854..67a7d84b26fe 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java @@ -139,19 +139,23 @@ private static void setStackTrace(Throwable error) { error.setStackTrace(newStackTrace); } - private static IOException rethrow(ExecutionException error) throws IOException { - Throwable cause = error.getCause(); - if (cause instanceof IOException) { - setStackTrace(cause); - throw (IOException) cause; - } else if (cause instanceof RuntimeException) { - setStackTrace(cause); - throw (RuntimeException) cause; - } else if (cause instanceof Error) { - setStackTrace(cause); - throw (Error) cause; + /** + * If we could propagate the given {@code error} directly, we will fill the stack trace with the + * current thread's stack trace so it is easier to trace where is the exception thrown. If not, we + * will just create a new IOException and then throw it. + */ + public static IOException rethrow(Throwable error) throws IOException { + if (error instanceof IOException) { + setStackTrace(error); + throw (IOException) error; + } else if (error instanceof RuntimeException) { + setStackTrace(error); + throw (RuntimeException) error; + } else if (error instanceof Error) { + setStackTrace(error); + throw (Error) error; } else { - throw new IOException(cause); + throw new IOException(error); } } @@ -165,7 +169,7 @@ public static T get(Future future) throws IOException { } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { - throw rethrow(e); + throw rethrow(e.getCause()); } } @@ -179,7 +183,7 @@ public static T get(Future future, long timeout, TimeUnit unit) throws IO } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { - throw rethrow(e); + throw rethrow(e.getCause()); } catch (TimeoutException e) { throw new TimeoutIOException(e); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java index 0eef0a6fc51d..f09d94739724 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestFutureUtils.java @@ -19,8 +19,8 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; From 9a2a65de9f696828e7e5d58c790405ed209fc96f Mon Sep 17 00:00:00 2001 From: stack Date: Sun, 20 Sep 2020 14:15:34 +0530 Subject: [PATCH 369/769] HBASE-24896 'Stuck' in static initialization creating RegionInfo instance Closes #2422 Untangle RegionInfo, RegionInfoBuilder, and MutableRegionInfo static initializations some. Move MutableRegionInfo from inner-class of RegionInfoBuilder to be (package private) standalone. Undo static initializing references from RI to RIB. Co-authored-by: Nick Dimiduk Signed-off-by: Bharath Vissapragada Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../hadoop/hbase/client/RegionInfo.java | 9 ++- .../TestRegionInfoStaticInitialization.java | 70 +++++++++++++++++++ 2 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 7a3a9af227f7..493b389b72aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -71,7 +71,9 @@ public interface RegionInfo extends Comparable { */ @Deprecated @InterfaceAudience.Private - RegionInfo UNDEFINED = RegionInfoBuilder.newBuilder(TableName.valueOf("__UNDEFINED__")).build(); + // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24627 + RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), + RegionInfo.DEFAULT_REPLICA_ID); /** * Separator used to demarcate the encodedName in a region name @@ -588,8 +590,9 @@ static String prettyPrint(final String encodedRegionName) { * @return the MOB {@link RegionInfo}. */ static RegionInfo createMobRegionInfo(TableName tableName) { - return RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")). - setRegionId(0).build(); + // Skipping reference to RegionInfoBuilder in this class. + return new MutableRegionInfo(tableName, Bytes.toBytes(".mob"), + HConstants.EMPTY_END_ROW, false, 0, DEFAULT_REPLICA_ID, false); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java new file mode 100644 index 000000000000..48729faae3ef --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Stream; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test for the tangled mess that is static initialization of our our {@link RegionInfo} and + * {@link RegionInfoBuilder}, as reported on HBASE-24896. The condition being tested can only be + * reproduced the first time a JVM loads the classes under test. Thus, this test is marked as a + * {@link LargeTests} because, under their current configuration, tests in that category are run + * in their own JVM instances. + */ +@SuppressWarnings("deprecation") +@Category({ RegionServerTests.class, LargeTests.class}) +public class TestRegionInfoStaticInitialization { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionInfoStaticInitialization.class); + + @Test + public void testParallelStaticInitialization() throws Exception { + // The JVM loads symbols lazily. These suppliers reference two symbols that, before this patch, + // are mutually dependent and expose a deadlock in the loading of symbols from RegionInfo and + // RegionInfoBuilder. + final Supplier retrieveUNDEFINED = () -> RegionInfo.UNDEFINED; + final Supplier retrieveMetaRegionInfo = + () -> RegionInfoBuilder.FIRST_META_REGIONINFO; + + // The test runs multiple threads that reference these mutually dependent symbols. In order to + // express this bug, these threads need to access these symbols at roughly the same time, so + // that the classloader is asked to materialize these symbols concurrently. These Suppliers are + // run on threads that have already been allocated, managed by the system's ForkJoin pool. + final CompletableFuture[] futures = Stream.of( + retrieveUNDEFINED, retrieveMetaRegionInfo, retrieveUNDEFINED, retrieveMetaRegionInfo) + .map(CompletableFuture::supplyAsync) + .toArray(CompletableFuture[]::new); + + // Loading classes should be relatively fast. 5 seconds is an arbitrary choice of timeout. It + // was chosen under the assumption that loading these symbols should complete much faster than + // this window. + CompletableFuture.allOf(futures).get(5, TimeUnit.SECONDS); + } +} From b378822b7f3e9151fc5cd6d713cb1edc7b25633b Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sun, 20 Sep 2020 14:31:22 +0530 Subject: [PATCH 370/769] HBASE-24896 : Jira number correction in comment (ADDENDUM) --- .../main/java/org/apache/hadoop/hbase/client/RegionInfo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 493b389b72aa..d860c7681a37 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -71,7 +71,7 @@ public interface RegionInfo extends Comparable { */ @Deprecated @InterfaceAudience.Private - // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24627 + // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24896 RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"), RegionInfo.DEFAULT_REPLICA_ID); From d9cb47e51310e39880e0185863cebd09659353ea Mon Sep 17 00:00:00 2001 From: niuyulin Date: Mon, 21 Sep 2020 02:08:43 +0800 Subject: [PATCH 371/769] HBASE-25072 Remove the unnecessary System.out.println in MasterRegistry (#2429) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/client/MasterRegistry.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 2a7ae16df47a..06582684c79c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -325,7 +325,6 @@ private static List transformServerNames(GetMastersResponse resp) { } CompletableFuture> getMasters() { - System.out.println("getMasters()"); return this . call((c, s, d) -> s.getMasters( c, GetMastersRequest.getDefaultInstance(), d), r -> r.getMasterServersCount() != 0, @@ -346,4 +345,4 @@ public void close() { rpcClient.close(); } } -} \ No newline at end of file +} From fdf9a5480578b9e0bdb5c0ac525d6eded7a0c15d Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Mon, 21 Sep 2020 08:25:58 +0800 Subject: [PATCH 372/769] HBASE-24976 Printing the swallowed exception Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani Signed-off-by: Guanghao Zhang --- .../java/org/apache/hadoop/hbase/rest/RESTServer.java | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 2ad57e1b742c..c6f769ee6054 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -382,13 +382,8 @@ public synchronized void run() throws Exception { this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } - try { - // start server - server.start(); - } catch (Exception e) { - LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); - throw e; - } + // start server + server.start(); } public synchronized void join() throws Exception { @@ -442,6 +437,7 @@ public static void main(String[] args) throws Exception { server.run(); server.join(); } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } From 92662fc1697d43ef9e3049357722dc02f37b233a Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Mon, 21 Sep 2020 08:27:38 +0800 Subject: [PATCH 373/769] Revert "HBASE-24976 Printing the swallowed exception" This reverts commit 8efd2509e9f4d44a699f0ad0e50c0b7ca7ad2b2a. --- .../java/org/apache/hadoop/hbase/rest/RESTServer.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index c6f769ee6054..2ad57e1b742c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -382,8 +382,13 @@ public synchronized void run() throws Exception { this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } - // start server - server.start(); + try { + // start server + server.start(); + } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); + throw e; + } } public synchronized void join() throws Exception { @@ -437,7 +442,6 @@ public static void main(String[] args) throws Exception { server.run(); server.join(); } catch (Exception e) { - LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } From 830bf13c544facc4427c64d53534b8165a9be1b8 Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Mon, 21 Sep 2020 08:25:58 +0800 Subject: [PATCH 374/769] HBASE-24976 REST Server failes to start without any error message Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani Signed-off-by: Guanghao Zhang --- .../java/org/apache/hadoop/hbase/rest/RESTServer.java | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 2ad57e1b742c..c6f769ee6054 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -382,13 +382,8 @@ public synchronized void run() throws Exception { this.infoServer.setAttribute("hbase.conf", conf); this.infoServer.start(); } - try { - // start server - server.start(); - } catch (Exception e) { - LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); - throw e; - } + // start server + server.start(); } public synchronized void join() throws Exception { @@ -442,6 +437,7 @@ public static void main(String[] args) throws Exception { server.run(); server.join(); } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } From 31ed6c3c189dba1ab46bb34890e022ccadf9d1ae Mon Sep 17 00:00:00 2001 From: niuyulin Date: Tue, 22 Sep 2020 00:44:47 +0800 Subject: [PATCH 375/769] HBASE-25075 Fix typo in ReplicationProtobufUtil (#2431) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel Signed-off-by: Guanghao Zhang Signed-off-by: Duo Zhang --- .../client/AsyncRegionReplicaReplayRetryingCaller.java | 4 ++-- ...icationProtbufUtil.java => ReplicationProtobufUtil.java} | 2 +- .../regionserver/HBaseInterClusterReplicationEndpoint.java | 4 ++-- .../regionserver/ReplaySyncReplicationWALCallable.java | 4 ++-- .../hadoop/hbase/protobuf/TestReplicationProtobuf.java | 2 +- .../hadoop/hbase/replication/SyncReplicationTestBase.java | 6 +++--- 6 files changed, 11 insertions(+), 11 deletions(-) rename hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/{ReplicationProtbufUtil.java => ReplicationProtobufUtil.java} (99%) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java index 91d950265db8..0146c8b94d2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -104,7 +104,7 @@ private void call(HRegionLocation loc) { err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } - Pair p = ReplicationProtbufUtil + Pair p = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries, encodedRegionName, null, null, null); resetCallTimeout(); controller.setCellScanner(p.getSecond()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java index 4e2e5779303f..e47c92914f0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; @InterfaceAudience.Private -public class ReplicationProtbufUtil { +public class ReplicationProtobufUtil { /** * A helper to replicate a list of WAL entries using region server admin diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 816345f629d3..4e0669c2e9fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; @@ -632,7 +632,7 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) sinkPeer = replicationSinkMgr.getReplicationSink(); AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer(); try { - ReplicationProtbufUtil.replicateWALEntry(rsAdmin, + ReplicationProtobufUtil.replicateWALEntry(rsAdmin, entries.toArray(new Entry[entries.size()]), replicationClusterId, baseNamespaceDir, hfileArchiveDir, timeout); if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index 907faba3e404..e03bbe2b1c65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.util.KeyLocker; @@ -123,7 +123,7 @@ private void replayWAL(String wal) throws IOException { try (Reader reader = getReader(wal)) { List entries = readWALEntries(reader); while (!entries.isEmpty()) { - Pair pair = ReplicationProtbufUtil + Pair pair = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); ReplicateWALEntryRequest request = pair.getFirst(); rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java index 7b0e6cbdd8f1..615fa6445227 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java @@ -63,7 +63,7 @@ public void testGetCellScanner() throws IOException { all.add(a); all.add(b); all.add(c); - CellScanner scanner = ReplicationProtbufUtil.getCellScanner(all, 0); + CellScanner scanner = ReplicationProtobufUtil.getCellScanner(all, 0); testAdvancetHasSameRow(scanner, akv); // Skip over aa scanner.advance(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java index f11bd498bb1c..23753e211054 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; @@ -266,12 +266,12 @@ protected final void verifyReplicationRequestRejection(HBaseTestingUtility utili new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit()); } if (!expectedRejection) { - ReplicationProtbufUtil.replicateWALEntry( + ReplicationProtobufUtil.replicateWALEntry( connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); } else { try { - ReplicationProtbufUtil.replicateWALEntry( + ReplicationProtobufUtil.replicateWALEntry( connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); fail("Should throw IOException when sync-replication state is in A or DA"); From dd8d76c34326c81f758b15215a3099684f9e8665 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Tue, 22 Sep 2020 01:00:49 +0800 Subject: [PATCH 376/769] HBASE-25076 fix typo in MasterRegion java doc (#2432) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/master/region/MasterRegion.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java index aa1b9d1257ea..81da59d6b665 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java @@ -79,9 +79,9 @@ * Notice that, you can use different root file system and WAL file system. Then the above directory * will be on two file systems, the root file system will have the data directory while the WAL * filesystem will have the WALs directory. The archived HFile will be moved to the global HFile - * archived directory with the {@link MasterRegionParams#archivedWalSuffix()} suffix. The archived + * archived directory with the {@link MasterRegionParams#archivedHFileSuffix()} suffix. The archived * WAL will be moved to the global WAL archived directory with the - * {@link MasterRegionParams#archivedHFileSuffix()} suffix. + * {@link MasterRegionParams#archivedWalSuffix()} suffix. */ @InterfaceAudience.Private public final class MasterRegion { From 43cac715c670f7fcdea63738a3b352901a64e418 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 22 Sep 2020 17:53:15 +0200 Subject: [PATCH 377/769] HBASE-25081 Up the container nproc uplimit to 30000 (#2439) --- dev-support/hbase-personality.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index d7ca64cbb742..6f1355cf31a1 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -80,9 +80,9 @@ function personality_globals # TODO use PATCH_BRANCH to select jdk versions to use. # Yetus 0.7.0 enforces limits. Default proclimit is 1000. - # Up it. See HBASE-19902 for how we arrived at this number. + # Up it. See HBASE-25081 for how we arrived at this number. #shellcheck disable=SC2034 - PROC_LIMIT=12500 + PROC_LIMIT=30000 # Set docker container to run with 20g. Default is 4g in yetus. # See HBASE-19902 for how we arrived at 20g. From ebc0fddde564d4a1c9e18eefcd6d381b6d93e5ce Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 22 Sep 2020 10:10:31 -0700 Subject: [PATCH 378/769] HBASE-25067 Edit of log messages around async WAL Replication; checkstyle fixes; and a bugfix (#2435) Editing logging around region replicas: shortening and adding context. Checkstyle fixes in edited files while I was in there. Signed-off-by: Duo Zhang --- .../replication/ReplicationPeerImpl.java | 4 +- .../apache/hadoop/hbase/master/HMaster.java | 2 +- .../procedure/EnableTableProcedure.java | 4 +- .../hadoop/hbase/regionserver/HRegion.java | 23 +++++----- .../hbase/regionserver/HRegionServer.java | 13 +++--- .../handler/AssignRegionHandler.java | 11 +++-- .../handler/RegionReplicaFlushHandler.java | 31 ++++++------- .../handler/UnassignRegionHandler.java | 14 +++--- .../regionserver/wal/ProtobufLogReader.java | 4 +- .../regionserver/ReplicationSource.java | 43 +++++++++---------- .../regionserver/WALEntryStream.java | 12 +++--- .../hbase/zookeeper/MetaTableLocator.java | 6 +-- 12 files changed, 82 insertions(+), 85 deletions(-) diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 22026e5284ce..08799856b754 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ public ReplicationPeerImpl(Configuration conf, String id, ReplicationPeerConfig SyncReplicationState newSyncReplicationState) { this.conf = conf; this.id = id; - this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED; + setPeerState(peerState); this.peerConfig = peerConfig; this.syncReplicationStateBits = syncReplicationState.value() | (newSyncReplicationState.value() << SHIFT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c87f144fc876..e4bd3c5fce22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1787,7 +1787,7 @@ public boolean balance(boolean force) throws IOException { toPrint = regionsInTransition.subList(0, max); truncated = true; } - LOG.info(prefix + "unning balancer because " + regionsInTransition.size() + + LOG.info(prefix + " not running balancer because " + regionsInTransition.size() + " region(s) in transition: " + toPrint + (truncated? "(truncated list)": "")); if (!force || metaInTransition) return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 8ad3ae6d33c6..6ca83fe01efe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -142,9 +142,9 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS } } else { // the replicasFound is less than the regionReplication - LOG.info("Number of replicas has increased. Assigning new region replicas." + + LOG.info("Number of replicas has increased for {}. Assigning new region replicas." + "The previous replica count was {}. The current replica count is {}.", - (currentMaxReplica + 1), configuredReplicaCount); + this.tableName, (currentMaxReplica + 1), configuredReplicaCount); regionsOfTable = RegionReplicaUtil.addReplicas(regionsOfTable, currentMaxReplica + 1, configuredReplicaCount); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index a09151564356..a208d9330042 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2438,11 +2438,13 @@ public FlushResultImpl flushcache(List families, status.setStatus("Acquiring readlock on region"); // block waiting for the lock for flushing cache lock.readLock().lock(); + boolean flushed = true; try { if (this.closed.get()) { String msg = "Skipping flush on " + this + " because closed"; LOG.debug(msg); status.abort(msg); + flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } if (coprocessorHost != null) { @@ -2459,15 +2461,11 @@ public FlushResultImpl flushcache(List families, if (!writestate.flushing && writestate.writesEnabled) { this.writestate.flushing = true; } else { - if (LOG.isDebugEnabled()) { - LOG.debug("NOT flushing memstore for region " + this - + ", flushing=" + writestate.flushing + ", writesEnabled=" - + writestate.writesEnabled); - } - String msg = "Not flushing since " - + (writestate.flushing ? "already flushing" - : "writes not enabled"); + String msg = "NOT flushing " + this + " as " + (writestate.flushing ? "already flushing" + : "writes are not enabled"); + LOG.debug(msg); status.abort(msg); + flushed = false; return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } } @@ -2505,8 +2503,11 @@ public FlushResultImpl flushcache(List families, } } finally { lock.readLock().unlock(); - LOG.debug("Flush status journal for {}:\n{}", this.getRegionInfo().getEncodedName(), - status.prettyPrintJournal()); + if (flushed) { + // Don't log this journal stuff if no flush -- confusing. + LOG.debug("Flush status journal for {}:\n{}", this.getRegionInfo().getEncodedName(), + status.prettyPrintJournal()); + } status.cleanup(); } } @@ -5032,7 +5033,7 @@ protected void checkReadsEnabled() throws IOException { public void setReadsEnabled(boolean readsEnabled) { if (readsEnabled && !this.writestate.readsEnabled) { - LOG.info(getRegionInfo().getEncodedName() + " : Enabling reads for region."); + LOG.info("Enabling reads for {}", getRegionInfo().getEncodedName()); } this.writestate.setReadsEnabled(readsEnabled); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index c33be53d4538..f14da2f6a17e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -123,6 +123,8 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.mob.MobFileCache; +import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore; import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore; @@ -139,8 +141,6 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler; import org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler; -import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; -import org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore; import org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; @@ -2466,10 +2466,13 @@ private void triggerFlushInPrimaryRegion(final HRegion region) { region.setReadsEnabled(false); // disable reads before marking the region as opened. // RegionReplicaFlushHandler might reset this. - // submit it to be handled by one of the handlers so that we do not block OpenRegionHandler + // Submit it to be handled by one of the handlers so that we do not block OpenRegionHandler if (this.executorService != null) { this.executorService.submit(new RegionReplicaFlushHandler(this, region)); - } + } else { + LOG.info("Executor is null; not running flush of primary region replica for {}", + region.getRegionInfo()); + } } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index 737f1653bc94..98d09b20e879 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -92,7 +92,7 @@ public void process() throws IOException { String regionName = regionInfo.getRegionNameAsString(); Region onlineRegion = rs.getRegion(encodedName); if (onlineRegion != null) { - LOG.warn("Received OPEN for the region:{}, which is already online", regionName); + LOG.warn("Received OPEN for {} which is already online", regionName); // Just follow the old behavior, do we need to call reportRegionStateTransition? Maybe not? // For normal case, it could happen that the rpc call to schedule this handler is succeeded, // but before returning to master the connection is broken. And when master tries again, we @@ -104,7 +104,7 @@ public void process() throws IOException { if (previous != null) { if (previous) { // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it. - LOG.info("Receiving OPEN for the region:{}, which we are already trying to OPEN" + + LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + " - ignoring this new request for this region.", regionName); } else { // The region is closing. This is possible as we will update the region state to CLOSED when @@ -113,7 +113,7 @@ public void process() throws IOException { // closing process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); LOG.info( - "Receiving OPEN for the region:{}, which we are trying to close, try again after {}ms", + "Receiving OPEN for {} which we are trying to close, try again after {}ms", regionName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } @@ -145,11 +145,10 @@ public void process() throws IOException { Boolean current = rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes()); if (current == null) { // Should NEVER happen, but let's be paranoid. - LOG.error("Bad state: we've just opened a region that was NOT in transition. Region={}", - regionName); + LOG.error("Bad state: we've just opened {} which was NOT in transition", regionName); } else if (!current) { // Should NEVER happen, but let's be paranoid. - LOG.error("Bad state: we've just opened a region that was closing. Region={}", regionName); + LOG.error("Bad state: we've just opened {} which was closing", regionName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index cc798cc2443f..dddf5532442c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; /** - * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to wal in + * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to WAL in * secondary region replicas. This means that a secondary region replica can serve some edits from - * it's memstore that that is still not flushed from primary. We do not want to allow secondary + * it's memstore that are still not flushed from primary. We do not want to allow secondary * region's seqId to go back in time, when this secondary region is opened elsewhere after a * crash or region move. We will trigger a flush cache in the primary region replica and wait * for observing a complete flush cycle before marking the region readsEnabled. This handler does @@ -50,7 +50,6 @@ */ @InterfaceAudience.Private public class RegionReplicaFlushHandler extends EventHandler { - private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaFlushHandler.class); private final AsyncClusterConnection connection; @@ -73,7 +72,7 @@ protected void handleException(Throwable t) { if (t instanceof InterruptedIOException || t instanceof InterruptedException) { LOG.error("Caught throwable while processing event " + eventType, t); } else if (t instanceof RuntimeException) { - server.abort("ServerAborting because a runtime exception was thrown", t); + server.abort("Server aborting", t); } else { // something fishy since we cannot flush the primary region until all retries (retries from // rpc times 35 trigger). We cannot close the region since there is no such mechanism to @@ -101,9 +100,9 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { RetryCounter counter = new RetryCounterFactory(maxAttempts, (int)pause).create(); if (LOG.isDebugEnabled()) { - LOG.debug("Attempting to do an RPC to the primary region replica " + ServerRegionReplicaUtil - .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " - + region.getRegionInfo().getEncodedName() + " to trigger a flush"); + LOG.debug("RPC'ing to primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + " from " + + region.getRegionInfo() + " to trigger FLUSH"); } while (!region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped()) { @@ -142,11 +141,11 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // then we have to wait for seeing the flush entry. All reads will be rejected until we see // a complete flush cycle or replay a region open event if (LOG.isDebugEnabled()) { - LOG.debug("Successfully triggered a flush of primary region replica " + + LOG.debug("Triggered flush of primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) .getRegionNameAsString() + - " of region " + region.getRegionInfo().getRegionNameAsString() + - " Now waiting and blocking reads until observing a full flush cycle"); + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until completes a full flush cycle"); } region.setReadsEnabled(true); break; @@ -154,12 +153,10 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { if (response.hasWroteFlushWalMarker()) { if (response.getWroteFlushWalMarker()) { if (LOG.isDebugEnabled()) { - LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " + - "region replica " + - ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) - .getRegionNameAsString() + - " of region " + region.getRegionInfo().getRegionNameAsString() + - " Now waiting and " + "blocking reads until observing a flush marker"); + LOG.debug("Triggered empty flush marker (memstore empty) on primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()). + getRegionNameAsString() + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until observing a flush marker"); } region.setReadsEnabled(true); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index 8b275d0e6ed1..0bf2543a445a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,19 +84,18 @@ public void process() throws IOException { // reportRegionStateTransition, so the HMaster will think the region is online, before we // actually open the region, as reportRegionStateTransition is part of the opening process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Received CLOSE for the region: {}, which we are already " + - "trying to OPEN. try again after {}ms", encodedName, backoff); + LOG.warn("Received CLOSE for {} which we are already " + + "trying to OPEN; try again after {}ms", encodedName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } else { - LOG.info("Received CLOSE for the region: {}, which we are already trying to CLOSE," + + LOG.info("Received CLOSE for {} which we are already trying to CLOSE," + " but not completed yet", encodedName); } return; } HRegion region = rs.getRegion(encodedName); if (region == null) { - LOG.debug( - "Received CLOSE for a region {} which is not online, and we're not opening/closing.", + LOG.debug("Received CLOSE for {} which is not ONLINE and we're not opening/closing.", encodedName); rs.getRegionsInTransitionInRS().remove(encodedNameBytes, Boolean.FALSE); return; @@ -114,10 +113,11 @@ public void process() throws IOException { if (region.close(abort) == null) { // XXX: Is this still possible? The old comment says about split, but now split is done at // master side, so... - LOG.warn("Can't close region {}, was already closed during close()", regionName); + LOG.warn("Can't close {} already closed during close()", regionName); rs.getRegionsInTransitionInRS().remove(encodedNameBytes, Boolean.FALSE); return; } + rs.removeRegion(region, destination); if (!rs.reportRegionStateTransition( new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, closeProcId, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 6f537df94900..0967c101ce58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -412,14 +412,14 @@ protected boolean readNext(Entry entry) throws IOException { + "because originalPosition is negative. last offset={}", this.inputStream.getPos(), eof); throw eof; } - // If stuck at the same place and we got and exception, lets go back at the beginning. + // If stuck at the same place and we got an exception, lets go back at the beginning. if (inputStream.getPos() == originalPosition) { if (resetPosition) { LOG.warn("Encountered a malformed edit, seeking to the beginning of the WAL since " + "current position and original position match at {}", originalPosition); seekOnFs(0); } else { - LOG.debug("Reached the end of file at position {}", originalPosition); + LOG.debug("EOF at position {}", originalPosition); } } else { // Else restore our position to original location in hope that next time through we will diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index dc0276dc7075..cb9a14d46b28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -38,7 +38,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; - import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; @@ -249,34 +248,35 @@ public void enqueueLog(Path wal) { LOG.trace("NOT replicating {}", wal); return; } - String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); - PriorityBlockingQueue queue = queues.get(logPrefix); + // Use WAL prefix as the WALGroupId for this peer. + String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); + PriorityBlockingQueue queue = queues.get(walPrefix); if (queue == null) { queue = new PriorityBlockingQueue<>(queueSizePerGroup, new LogsComparator()); // make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise // the shipper may quit immediately queue.put(wal); - queues.put(logPrefix, queue); + queues.put(walPrefix, queue); if (this.isSourceActive() && this.walEntryFilter != null) { // new wal group observed after source startup, start a new worker thread to track it // notice: it's possible that wal enqueued when this.running is set but worker thread // still not launched, so it's necessary to check workerThreads before start the worker - tryStartNewShipper(logPrefix, queue); + tryStartNewShipper(walPrefix, queue); } } else { queue.put(wal); } if (LOG.isTraceEnabled()) { - LOG.trace("{} Added wal {} to queue of source {}.", logPeerId(), logPrefix, + LOG.trace("{} Added wal {} to queue of source {}.", logPeerId(), walPrefix, this.replicationQueueInfo.getQueueId()); } this.metrics.incrSizeOfLogQueue(); // This will wal a warning for each new wal that gets created above the warn threshold int queueSize = queue.size(); if (queueSize > this.logQueueWarnThreshold) { - LOG.warn("{} WAL group {} queue size: {} exceeds value of " - + "replication.source.log.queue.warn: {}", logPeerId(), - logPrefix, queueSize, logQueueWarnThreshold); + LOG.warn("{} WAL group {} queue size: {} exceeds value of " + + "replication.source.log.queue.warn {}", logPeerId(), walPrefix, queueSize, + logQueueWarnThreshold); } } @@ -372,16 +372,10 @@ private void initializeWALEntryFilter(UUID peerClusterId) { private void tryStartNewShipper(String walGroupId, PriorityBlockingQueue queue) { workerThreads.compute(walGroupId, (key, value) -> { if (value != null) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "{} Someone has beat us to start a worker thread for wal group {}", - logPeerId(), key); - } + LOG.debug("{} preempted start of worker walGroupId={}", logPeerId(), walGroupId); return value; } else { - if (LOG.isDebugEnabled()) { - LOG.debug("{} Starting up worker for wal group {}", logPeerId(), key); - } + LOG.debug("{} starting worker for walGroupId={}", logPeerId(), walGroupId); ReplicationSourceShipper worker = createNewShipper(walGroupId, queue); ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, queue, worker.getStartPosition()); @@ -457,8 +451,7 @@ private ReplicationSourceWALReader createNewWALReader(String walGroupId, /** * Call after {@link #initializeWALEntryFilter(UUID)} else it will be null. - * @return The WAL Entry Filter Chain this ReplicationSource will use on WAL files filtering - * out WALEntry edits. + * @return WAL Entry Filter Chain to use on WAL files filtering *out* WALEntry edits. */ @VisibleForTesting WALEntryFilter getWalEntryFilter() { @@ -610,7 +603,7 @@ private void initialize() { this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } - LOG.info("{} Source: {}, is now replicating from cluster: {}; to peer cluster: {};", + LOG.info("{} queueId={} is replicating from cluster={} to cluster={}", logPeerId(), this.replicationQueueInfo.getQueueId(), clusterId, peerClusterId); initializeWALEntryFilter(peerClusterId); @@ -625,10 +618,13 @@ private void initialize() { @Override public void startup() { + if (this.sourceRunning) { + return; + } + this.sourceRunning = true; //Flag that signalizes uncaught error happening while starting up the source // and a retry should be attempted MutableBoolean retryStartup = new MutableBoolean(true); - this.sourceRunning = true; do { if(retryStartup.booleanValue()) { retryStartup.setValue(false); @@ -661,7 +657,8 @@ public void terminate(String reason, Exception cause, boolean clearMetrics) { terminate(reason, cause, clearMetrics, true); } - public void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { + public void terminate(String reason, Exception cause, boolean clearMetrics, + boolean join) { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { @@ -855,6 +852,6 @@ void removeWorker(ReplicationSourceShipper worker) { } private String logPeerId(){ - return "[Source for peer " + this.getPeer().getId() + "]:"; + return "peerId=" + this.getPeerId() + ","; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 0454e817b0e3..a95ee13c7379 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -174,7 +174,7 @@ private void setCurrentPath(Path path) { private void tryAdvanceEntry() throws IOException { if (checkReader()) { boolean beingWritten = readNextEntryAndRecordReaderPosition(); - LOG.trace("reading wal file {}. Current open for write: {}", this.currentPath, beingWritten); + LOG.trace("Reading WAL {}; currently open for write={}", this.currentPath, beingWritten); if (currentEntry == null && !beingWritten) { // no more entries in this log file, and the file is already closed, i.e, rolled // Before dequeueing, we should always get one more attempt at reading. @@ -222,7 +222,7 @@ private boolean checkAllBytesParsed() throws IOException { if (currentPositionOfReader < stat.getLen()) { final long skippedBytes = stat.getLen() - currentPositionOfReader; LOG.debug( - "Reached the end of WAL file '{}'. It was not closed cleanly," + + "Reached the end of WAL {}. It was not closed cleanly," + " so we did not parse {} bytes of data. This is normally ok.", currentPath, skippedBytes); metrics.incrUncleanlyClosedWALs(); @@ -230,7 +230,7 @@ private boolean checkAllBytesParsed() throws IOException { } } else if (currentPositionOfReader + trailerSize < stat.getLen()) { LOG.warn( - "Processing end of WAL file '{}'. At position {}, which is too far away from" + + "Processing end of WAL {} at position {}, which is too far away from" + " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", currentPath, currentPositionOfReader, stat.getLen(), getCurrentPathStat()); setPosition(0); @@ -241,7 +241,7 @@ private boolean checkAllBytesParsed() throws IOException { } } if (LOG.isTraceEnabled()) { - LOG.trace("Reached the end of log " + this.currentPath + ", and the length of the file is " + + LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + (stat == null ? "N/A" : stat.getLen())); } metrics.incrCompletedWAL(); @@ -249,7 +249,7 @@ private boolean checkAllBytesParsed() throws IOException { } private void dequeueCurrentLog() throws IOException { - LOG.debug("Reached the end of log {}", currentPath); + LOG.debug("EOF, closing {}", currentPath); closeReader(); logQueue.remove(); setPosition(0); @@ -264,7 +264,7 @@ private boolean readNextEntryAndRecordReaderPosition() throws IOException { long readerPos = reader.getPosition(); OptionalLong fileLength = walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath); if (fileLength.isPresent() && readerPos > fileLength.getAsLong()) { - // see HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted + // See HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted // data, so we need to make sure that we do not read beyond the committed file length. if (LOG.isDebugEnabled()) { LOG.debug("The provider tells us the valid length for " + currentPath + " is " + diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index bb02af3788aa..557ba77c5236 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -221,7 +221,7 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- ServerName required"); return; } - LOG.info("Setting hbase:meta (replicaId={}) location in ZooKeeper as {}, state={}", replicaId, + LOG.info("Setting hbase:meta replicaId={} location in ZooKeeper as {}, state={}", replicaId, serverName, state); // Make the MetaRegionServer pb and then get its bytes and save this as // the znode content. @@ -235,9 +235,9 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); } catch(KeeperException.NoNodeException nne) { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { - LOG.debug("META region location doesn't exist, create it"); + LOG.debug("hbase:meta region location doesn't exist, create it"); } else { - LOG.debug("META region location doesn't exist for replicaId=" + replicaId + + LOG.debug("hbase:meta region location doesn't exist for replicaId=" + replicaId + ", create it"); } ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), From 62ae26b2e393a46ef50e7edeffab7459c3465271 Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 18 Sep 2020 17:29:23 -0700 Subject: [PATCH 379/769] HBASE-25068 Pass WALFactory to Replication so it knows of all WALProviders, not just default/user-space Pass WALFactory to Replication instead of WALProvider. WALFactory has all WALProviders in it, not just the user-space WALProvider. Do this so ReplicationService has access to all WALProviders in the Server (To be exploited by the follow-on patch in HBASE-25055) --- .../hadoop/hbase/regionserver/HRegionServer.java | 15 +++++++-------- .../hbase/regionserver/ReplicationService.java | 11 ++++------- .../replication/regionserver/Replication.java | 8 ++++---- .../regionserver/ReplicationSyncUp.java | 6 ++++-- .../hbase/replication/TestReplicationBase.java | 2 +- .../TestReplicationSourceManager.java | 3 ++- 6 files changed, 22 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index f14da2f6a17e..8abede5b272a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1911,8 +1911,7 @@ private void setupWALAndReplication() throws IOException { throw new IOException("Can not create wal directory " + logDir); } // Instantiate replication if replication enabled. Pass it the log directories. - createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, - factory.getWALProvider()); + createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); } this.walFactory = factory; } @@ -3063,7 +3062,7 @@ public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { * Load the replication executorService objects, if any */ private static void createNewReplicationInstance(Configuration conf, HRegionServer server, - FileSystem walFs, Path walDir, Path oldWALDir, WALProvider walProvider) throws IOException { + FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException { // read in the name of the source replication class from the config file. String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); @@ -3076,19 +3075,19 @@ private static void createNewReplicationInstance(Configuration conf, HRegionServ // only one object. if (sourceClassname.equals(sinkClassname)) { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); server.replicationSinkHandler = (ReplicationSinkService) server.replicationSourceHandler; } else { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); server.replicationSinkHandler = newReplicationInstance(sinkClassname, - ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walProvider); + ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory); } } private static T newReplicationInstance(String classname, Class xface, Configuration conf, HRegionServer server, FileSystem walFs, Path logDir, - Path oldLogDir, WALProvider walProvider) throws IOException { + Path oldLogDir, WALFactory walFactory) throws IOException { final Class clazz; try { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); @@ -3097,7 +3096,7 @@ private static T newReplicationInstance(String cl throw new IOException("Could not find class for " + classname); } T service = ReflectionUtils.newInstance(clazz, conf); - service.initialize(server, walFs, logDir, oldLogDir, walProvider); + service.initialize(server, walFs, logDir, oldLogDir, walFactory); return service; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index e9bbaea8ae46..33b3321755fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; -import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,14 +32,11 @@ */ @InterfaceAudience.Private public interface ReplicationService { - /** * Initializes the replication service object. - * @param walProvider can be null if not initialized inside a live region server environment, for - * example, {@code ReplicationSyncUp}. */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALProvider walProvider) - throws IOException; + void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALFactory walFactory) + throws IOException; /** * Start replication services. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 195877bf5f3c..d8a696c7172e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.yetus.audience.InterfaceAudience; @@ -89,7 +90,7 @@ public Replication() { @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, - WALProvider walProvider) throws IOException { + WALFactory walFactory) throws IOException { this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = @@ -128,6 +129,7 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager(); this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); + WALProvider walProvider = walFactory.getWALProvider(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, walProvider != null ? walProvider.getWALFileLengthProvider() : p -> OptionalLong.empty(), @@ -198,7 +200,6 @@ public void join() { * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace * directory required for replicating hfiles * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory - * @throws IOException */ @Override public void replicateLogEntries(List entries, CellScanner cells, @@ -211,7 +212,6 @@ public void replicateLogEntries(List entries, CellScanner cells, /** * If replication is enabled and this cluster is a master, * it starts - * @throws IOException */ @Override public void startReplicationService() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 98490f137dbe..b04c7eb75f02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -82,7 +83,8 @@ public boolean isAborted() { System.out.println("Start Replication Server start"); Replication replication = new Replication(); - replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, null); + replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, + new WALFactory(conf, "test", false)); ReplicationSourceManager manager = replication.getReplicationManager(); manager.init().get(); while (manager.activeFailoverTaskCount() > 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 6e1692a9a2bd..455b27298156 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 8e38114fa0a5..4abb00fee03c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -194,7 +194,8 @@ protected static void setupZkAndReplication() throws Exception { logDir = utility.getDataTestDir(HConstants.HREGION_LOGDIR_NAME); remoteLogDir = utility.getDataTestDir(ReplicationUtils.REMOTE_WAL_DIR_NAME); replication = new Replication(); - replication.initialize(new DummyServer(), fs, logDir, oldLogDir, null); + replication.initialize(new DummyServer(), fs, logDir, oldLogDir, + new WALFactory(conf, "test", false)); managerOfCluster = getManagerFromCluster(); if (managerOfCluster != null) { // After replication procedure, we need to add peer by hand (other than by receiving From 51cbedae21d674796222ee000d971bb40a4b5c8c Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Wed, 23 Sep 2020 08:30:43 +0800 Subject: [PATCH 380/769] HBASE-25074 Refactor ReplicationSinkManager: reduce code and make it easy to understand (#2430) Signed-off-by: Wellington Chevreuil Signed-off-by: Duo Zhang --- .../replication/HBaseReplicationEndpoint.java | 215 +++++++++++++----- .../HBaseInterClusterReplicationEndpoint.java | 51 +---- .../regionserver/ReplicationSinkManager.java | 193 ---------------- .../TestHBaseReplicationEndpoint.java | 210 +++++++++++++++++ .../TestReplicationSinkManager.java | 210 ----------------- .../TestSerialReplicationEndpoint.java | 10 +- 6 files changed, 382 insertions(+), 507 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 3cde0d5113a0..850a79125562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -22,8 +22,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; +import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; +import org.apache.hadoop.hbase.client.ClusterConnectionFactory; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; @@ -38,6 +46,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + /** * A {@link BaseReplicationEndpoint} for replication endpoints whose * target cluster is an HBase cluster. @@ -50,8 +61,58 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private ZKWatcher zkw = null; - private List regionServers = new ArrayList<>(0); - private long lastRegionServerUpdate; + protected Configuration conf; + + protected AsyncClusterConnection conn; + + /** + * Default maximum number of times a replication sink can be reported as bad before + * it will no longer be provided as a sink for replication without the pool of + * replication sinks being refreshed. + */ + public static final int DEFAULT_BAD_SINK_THRESHOLD = 3; + + /** + * Default ratio of the total number of peer cluster region servers to consider + * replicating to. + */ + public static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; + + // Ratio of total number of potential peer region servers to be used + private float ratio; + + // Maximum number of times a sink can be reported as bad before the pool of + // replication sinks is refreshed + private int badSinkThreshold; + // Count of "bad replication sink" reports per peer sink + private Map badReportCounts; + + private List sinkServers = new ArrayList<>(0); + + /* + * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different + * Connection implementations, or initialize it in a different way, so defining createConnection + * as protected for possible overridings. + */ + protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { + return ClusterConnectionFactory.createAsyncClusterConnection(conf, + null, User.getCurrent()); + } + + @Override + public void init(Context context) throws IOException { + super.init(context); + this.conf = HBaseConfiguration.create(ctx.getConfiguration()); + // TODO: This connection is replication specific or we should make it particular to + // replication and make replication specific settings such as compression or codec to use + // passing Cells. + this.conn = createConnection(this.conf); + this.ratio = + ctx.getConfiguration().getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); + this.badSinkThreshold = + ctx.getConfiguration().getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); + this.badReportCounts = Maps.newHashMap(); + } protected synchronized void disconnect() { if (zkw != null) { @@ -63,7 +124,7 @@ protected synchronized void disconnect() { * A private method used to re-establish a zookeeper session with a peer cluster. * @param ke */ - protected void reconnect(KeeperException ke) { + private void reconnect(KeeperException ke) { if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException || ke instanceof AuthFailedException) { String clusterKey = ctx.getPeerConfig().getClusterKey(); @@ -117,23 +178,17 @@ public synchronized UUID getPeerUUID() { return peerUUID; } - /** - * Get the ZK connection to this peer - * @return zk connection - */ - protected synchronized ZKWatcher getZkw() { - return zkw; - } - /** * Closes the current ZKW (if not null) and creates a new one * @throws IOException If anything goes wrong connecting */ - synchronized void reloadZkWatcher() throws IOException { - if (zkw != null) zkw.close(); + private synchronized void reloadZkWatcher() throws IOException { + if (zkw != null) { + zkw.close(); + } zkw = new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); - getZkw().registerListener(new PeerRegionServerListener(this)); + zkw.registerListener(new PeerRegionServerListener(this)); } @Override @@ -150,13 +205,19 @@ public boolean isAborted() { /** * Get the list of all the region servers from the specified peer - * @param zkw zk connection to use + * * @return list of region server addresses or an empty list if the slave is unavailable */ - protected static List fetchSlavesAddresses(ZKWatcher zkw) - throws KeeperException { - List children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, - zkw.getZNodePaths().rsZNode); + protected List fetchSlavesAddresses() { + List children = null; + try { + children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); + } catch (KeeperException ke) { + if (LOG.isDebugEnabled()) { + LOG.debug("Fetch slaves addresses failed", ke); + } + reconnect(ke); + } if (children == null) { return Collections.emptyList(); } @@ -167,43 +228,70 @@ protected static List fetchSlavesAddresses(ZKWatcher zkw) return addresses; } + protected synchronized void chooseSinks() { + List slaveAddresses = fetchSlavesAddresses(); + if (slaveAddresses.isEmpty()) { + LOG.warn("No sinks available at peer. Will not be able to replicate"); + } + Collections.shuffle(slaveAddresses, ThreadLocalRandom.current()); + int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); + this.sinkServers = slaveAddresses.subList(0, numSinks); + badReportCounts.clear(); + } + + protected synchronized int getNumSinks() { + return sinkServers.size(); + } + /** - * Get a list of all the addresses of all the available region servers - * for this peer cluster, or an empty list if no region servers available at peer cluster. - * @return list of addresses + * Get a randomly-chosen replication sink to replicate to. + * @return a replication sink to replicate to */ - // Synchronize peer cluster connection attempts to avoid races and rate - // limit connections when multiple replication sources try to connect to - // the peer cluster. If the peer cluster is down we can get out of control - // over time. - public synchronized List getRegionServers() { - try { - setRegionServers(fetchSlavesAddresses(this.getZkw())); - } catch (KeeperException ke) { - if (LOG.isDebugEnabled()) { - LOG.debug("Fetch slaves addresses failed", ke); - } - reconnect(ke); + protected synchronized SinkPeer getReplicationSink() throws IOException { + if (sinkServers.isEmpty()) { + LOG.info("Current list of sinks is out of date or empty, updating"); + chooseSinks(); } - return regionServers; + if (sinkServers.isEmpty()) { + throw new IOException("No replication sinks are available"); + } + ServerName serverName = + sinkServers.get(ThreadLocalRandom.current().nextInt(sinkServers.size())); + return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); } /** - * Set the list of region servers for that peer - * @param regionServers list of addresses for the region servers + * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it + * failed). If a single SinkPeer is reported as bad more than + * replication.bad.sink.threshold times, it will be removed + * from the pool of potential replication targets. + * + * @param sinkPeer The SinkPeer that had a failed replication attempt on it */ - public synchronized void setRegionServers(List regionServers) { - this.regionServers = regionServers; - lastRegionServerUpdate = System.currentTimeMillis(); + protected synchronized void reportBadSink(SinkPeer sinkPeer) { + ServerName serverName = sinkPeer.getServerName(); + int badReportCount = badReportCounts.compute(serverName, (k, v) -> v == null ? 1 : v + 1); + if (badReportCount > badSinkThreshold) { + this.sinkServers.remove(serverName); + if (sinkServers.isEmpty()) { + chooseSinks(); + } + } } /** - * Get the timestamp at which the last change occurred to the list of region servers to replicate - * to. - * @return The System.currentTimeMillis at the last time the list of peer region servers changed. + * Report that a {@code SinkPeer} successfully replicated a chunk of data. + * + * @param sinkPeer + * The SinkPeer that had a failed replication attempt on it */ - public long getLastRegionServerUpdate() { - return lastRegionServerUpdate; + protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) { + badReportCounts.remove(sinkPeer.getServerName()); + } + + @VisibleForTesting + List getSinkServers() { + return sinkServers; } /** @@ -214,22 +302,39 @@ public static class PeerRegionServerListener extends ZKListener { private final HBaseReplicationEndpoint replicationEndpoint; private final String regionServerListNode; - public PeerRegionServerListener(HBaseReplicationEndpoint replicationPeer) { - super(replicationPeer.getZkw()); - this.replicationEndpoint = replicationPeer; - this.regionServerListNode = replicationEndpoint.getZkw().getZNodePaths().rsZNode; + public PeerRegionServerListener(HBaseReplicationEndpoint endpoint) { + super(endpoint.zkw); + this.replicationEndpoint = endpoint; + this.regionServerListNode = endpoint.zkw.getZNodePaths().rsZNode; } @Override public synchronized void nodeChildrenChanged(String path) { if (path.equals(regionServerListNode)) { - try { - LOG.info("Detected change to peer region servers, fetching updated list"); - replicationEndpoint.setRegionServers(fetchSlavesAddresses(replicationEndpoint.getZkw())); - } catch (KeeperException e) { - LOG.error("Error reading slave addresses", e); - } + LOG.info("Detected change to peer region servers, fetching updated list"); + replicationEndpoint.chooseSinks(); } } } + + /** + * Wraps a replication region server sink to provide the ability to identify it. + */ + public static class SinkPeer { + private ServerName serverName; + private AsyncRegionServerAdmin regionServer; + + public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) { + this.serverName = serverName; + this.regionServer = regionServer; + } + + ServerName getServerName() { + return serverName; + } + + public AsyncRegionServerAdmin getRegionServer() { + return regionServer; + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 4e0669c2e9fd..b6e1f69173fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -41,7 +41,6 @@ import java.util.stream.Stream; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CellUtil; @@ -60,7 +59,6 @@ import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -100,8 +98,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi public static final String REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY = "hbase.replication.drop.on.deleted.columnfamily"; - private AsyncClusterConnection conn; - private Configuration conf; // How long should we sleep for each retry private long sleepForRetries; // Maximum number of retries before taking bold actions @@ -114,8 +110,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private int replicationRpcLimit; //Metrics for this source private MetricsSource metrics; - // Handles connecting to peer region servers - private ReplicationSinkManager replicationSinkMgr; private boolean peersSelected = false; private String replicationClusterId = ""; private ThreadPoolExecutor exec; @@ -130,25 +124,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi //Initialising as 0 to guarantee at least one logging message private long lastSinkFetchTime = 0; - /* - * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different - * Connection implementations, or initialize it in a different way, so defining createConnection - * as protected for possible overridings. - */ - protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { - return ClusterConnectionFactory.createAsyncClusterConnection(conf, - null, User.getCurrent()); - } - - /* - * Some implementations of HBaseInterClusterReplicationEndpoint may require instantiate different - * ReplicationSinkManager implementations, or initialize it in a different way, - * so defining createReplicationSinkManager as protected for possible overridings. - */ - protected ReplicationSinkManager createReplicationSinkManager(AsyncClusterConnection conn) { - return new ReplicationSinkManager(conn, this, this.conf); - } - @Override public void init(Context context) throws IOException { super.init(context); @@ -171,8 +146,6 @@ public void init(Context context) throws IOException { this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); - // ReplicationQueueInfo parses the peerId out of the znode for us - this.replicationSinkMgr = createReplicationSinkManager(conn); // per sink thread pool this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); @@ -211,14 +184,11 @@ private void decorateConf() { } private void connectToPeers() { - getRegionServers(); - int sleepMultiplier = 1; - // Connect to peer cluster first, unless we have to stop - while (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { - replicationSinkMgr.chooseSinks(); - if (this.isRunning() && replicationSinkMgr.getNumSinks() == 0) { + while (this.isRunning() && getNumSinks() == 0) { + chooseSinks(); + if (this.isRunning() && getNumSinks() == 0) { if (sleepForRetries("Waiting for peers", sleepMultiplier)) { sleepMultiplier++; } @@ -253,7 +223,7 @@ private int getEstimatedEntrySize(Entry e) { } private List> createParallelBatches(final List entries) { - int numSinks = Math.max(replicationSinkMgr.getNumSinks(), 1); + int numSinks = Math.max(getNumSinks(), 1); int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks); List> entryLists = Stream.generate(ArrayList::new).limit(n).collect(Collectors.toList()); @@ -513,7 +483,7 @@ public boolean replicate(ReplicateContext replicateContext) { peersSelected = true; } - int numSinks = replicationSinkMgr.getNumSinks(); + int numSinks = getNumSinks(); if (numSinks == 0) { if((System.currentTimeMillis() - lastSinkFetchTime) >= (maxRetriesMultiplier*1000)) { LOG.warn( @@ -561,7 +531,7 @@ public boolean replicate(ReplicateContext replicateContext) { } else { LOG.warn("{} Peer encountered RemoteException, rechecking all sinks: ", logPeerId(), ioe); - replicationSinkMgr.chooseSinks(); + chooseSinks(); } } else { if (ioe instanceof SocketTimeoutException) { @@ -574,7 +544,7 @@ public boolean replicate(ReplicateContext replicateContext) { this.socketTimeoutMultiplier); } else if (ioe instanceof ConnectException || ioe instanceof UnknownHostException) { LOG.warn("{} Peer is unavailable, rechecking all sinks: ", logPeerId(), ioe); - replicationSinkMgr.chooseSinks(); + chooseSinks(); } else { LOG.warn("{} Can't replicate because of a local or network error: ", logPeerId(), ioe); } @@ -629,7 +599,7 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) LOG.trace("{} Replicating batch {} of {} entries with total size {} bytes to {}", logPeerId(), entriesHashCode, entries.size(), size, replicationClusterId); } - sinkPeer = replicationSinkMgr.getReplicationSink(); + sinkPeer = getReplicationSink(); AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer(); try { ReplicationProtobufUtil.replicateWALEntry(rsAdmin, @@ -644,10 +614,10 @@ protected int replicateEntries(List entries, int batchIndex, int timeout) } throw e; } - replicationSinkMgr.reportSinkSuccess(sinkPeer); + reportSinkSuccess(sinkPeer); } catch (IOException ioe) { if (sinkPeer != null) { - replicationSinkMgr.reportBadSink(sinkPeer); + reportBadSink(sinkPeer); } throw ioe; } @@ -683,5 +653,4 @@ protected Callable createReplicator(List entries, int batchIndex private String logPeerId(){ return "[Source for peer " + this.ctx.getPeerId() + "]:"; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java deleted file mode 100644 index db12dc0a6fdf..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; - -/** - * Maintains a collection of peers to replicate to, and randomly selects a - * single peer to replicate to per set of data to replicate. Also handles - * keeping track of peer availability. - */ -@InterfaceAudience.Private -public class ReplicationSinkManager { - - private static final Logger LOG = LoggerFactory.getLogger(ReplicationSinkManager.class); - - /** - * Default maximum number of times a replication sink can be reported as bad before - * it will no longer be provided as a sink for replication without the pool of - * replication sinks being refreshed. - */ - static final int DEFAULT_BAD_SINK_THRESHOLD = 3; - - /** - * Default ratio of the total number of peer cluster region servers to consider - * replicating to. - */ - static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; - - - private final AsyncClusterConnection conn; - - private final HBaseReplicationEndpoint endpoint; - - // Count of "bad replication sink" reports per peer sink - private final Map badReportCounts; - - // Ratio of total number of potential peer region servers to be used - private final float ratio; - - // Maximum number of times a sink can be reported as bad before the pool of - // replication sinks is refreshed - private final int badSinkThreshold; - - // A timestamp of the last time the list of replication peers changed - private long lastUpdateToPeers; - - // The current pool of sinks to which replication can be performed - private List sinks = Lists.newArrayList(); - - /** - * Instantiate for a single replication peer cluster. - * @param conn connection to the peer cluster - * @param endpoint replication endpoint for inter cluster replication - * @param conf HBase configuration, used for determining replication source ratio and bad peer - * threshold - */ - public ReplicationSinkManager(AsyncClusterConnection conn, HBaseReplicationEndpoint endpoint, - Configuration conf) { - this.conn = conn; - this.endpoint = endpoint; - this.badReportCounts = Maps.newHashMap(); - this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); - this.badSinkThreshold = - conf.getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); - } - - /** - * Get a randomly-chosen replication sink to replicate to. - * @return a replication sink to replicate to - */ - public synchronized SinkPeer getReplicationSink() throws IOException { - if (endpoint.getLastRegionServerUpdate() > this.lastUpdateToPeers || sinks.isEmpty()) { - LOG.info("Current list of sinks is out of date or empty, updating"); - chooseSinks(); - } - - if (sinks.isEmpty()) { - throw new IOException("No replication sinks are available"); - } - ServerName serverName = sinks.get(ThreadLocalRandom.current().nextInt(sinks.size())); - return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); - } - - /** - * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it - * failed). If a single SinkPeer is reported as bad more than - * replication.bad.sink.threshold times, it will be removed - * from the pool of potential replication targets. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it - */ - public synchronized void reportBadSink(SinkPeer sinkPeer) { - ServerName serverName = sinkPeer.getServerName(); - int badReportCount = (badReportCounts.containsKey(serverName) - ? badReportCounts.get(serverName) : 0) + 1; - badReportCounts.put(serverName, badReportCount); - if (badReportCount > badSinkThreshold) { - this.sinks.remove(serverName); - if (sinks.isEmpty()) { - chooseSinks(); - } - } - } - - /** - * Report that a {@code SinkPeer} successfully replicated a chunk of data. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it - */ - public synchronized void reportSinkSuccess(SinkPeer sinkPeer) { - badReportCounts.remove(sinkPeer.getServerName()); - } - - /** - * Refresh the list of sinks. - */ - public synchronized void chooseSinks() { - List slaveAddresses = endpoint.getRegionServers(); - if(slaveAddresses.isEmpty()){ - LOG.warn("No sinks available at peer. Will not be able to replicate"); - } - Collections.shuffle(slaveAddresses, ThreadLocalRandom.current()); - int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); - sinks = slaveAddresses.subList(0, numSinks); - lastUpdateToPeers = System.currentTimeMillis(); - badReportCounts.clear(); - } - - public synchronized int getNumSinks() { - return sinks.size(); - } - - @VisibleForTesting - protected List getSinksForTesting() { - return Collections.unmodifiableList(sinks); - } - - /** - * Wraps a replication region server sink to provide the ability to identify - * it. - */ - public static class SinkPeer { - private ServerName serverName; - private AsyncRegionServerAdmin regionServer; - - public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) { - this.serverName = serverName; - this.regionServer = regionServer; - } - - ServerName getServerName() { - return serverName; - } - - public AsyncRegionServerAdmin getRegionServer() { - return regionServer; - } - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java new file mode 100644 index 000000000000..41601417a9d4 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; +import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; +import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.SinkPeer; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Category({ReplicationTests.class, SmallTests.class}) +public class TestHBaseReplicationEndpoint { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestHBaseReplicationEndpoint.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestHBaseReplicationEndpoint.class); + + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private HBaseReplicationEndpoint endpoint; + + @Before + public void setUp() throws Exception { + try { + ReplicationEndpoint.Context context = + new ReplicationEndpoint.Context(null, UTIL.getConfiguration(), UTIL.getConfiguration(), + null, null, null, null, null, null, null); + endpoint = new DummyHBaseReplicationEndpoint(); + endpoint.init(context); + } catch (Exception e) { + LOG.info("Failed", e); + } + } + + @Test + public void testChooseSinks() { + List serverNames = Lists.newArrayList(); + int totalServers = 20; + for (int i = 0; i < totalServers; i++) { + serverNames.add(mock(ServerName.class)); + } + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + int expected = (int) (totalServers * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + } + + @Test + public void testChooseSinksLessThanRatioAvailable() { + List serverNames = Lists.newArrayList(mock(ServerName.class), + mock(ServerName.class)); + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + assertEquals(1, endpoint.getNumSinks()); + } + + @Test + public void testReportBadSink() { + ServerName serverNameA = mock(ServerName.class); + ServerName serverNameB = mock(ServerName.class); + ((DummyHBaseReplicationEndpoint) endpoint) + .setRegionServers(Lists.newArrayList(serverNameA, serverNameB)); + endpoint.chooseSinks(); + // Sanity check + assertEquals(1, endpoint.getNumSinks()); + + SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); + endpoint.reportBadSink(sinkPeer); + // Just reporting a bad sink once shouldn't have an effect + assertEquals(1, endpoint.getNumSinks()); + } + + /** + * Once a SinkPeer has been reported as bad more than BAD_SINK_THRESHOLD times, it should not + * be replicated to anymore. + */ + @Test + public void testReportBadSinkPastThreshold() { + List serverNames = Lists.newArrayList(); + int totalServers = 30; + for (int i = 0; i < totalServers; i++) { + serverNames.add(mock(ServerName.class)); + } + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + // Sanity check + int expected = (int) (totalServers * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + + ServerName badSinkServer0 = endpoint.getSinkServers().get(0); + SinkPeer sinkPeer = new SinkPeer(badSinkServer0, mock(AsyncRegionServerAdmin.class)); + for (int i = 0; i <= HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD; i++) { + endpoint.reportBadSink(sinkPeer); + } + // Reporting a bad sink more than the threshold count should remove it + // from the list of potential sinks + assertEquals(expected - 1, endpoint.getNumSinks()); + + // now try a sink that has some successes + ServerName badSinkServer1 = endpoint.getSinkServers().get(0); + sinkPeer = new SinkPeer(badSinkServer1, mock(AsyncRegionServerAdmin.class)); + for (int i = 0; i < HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD; i++) { + endpoint.reportBadSink(sinkPeer); + } + endpoint.reportSinkSuccess(sinkPeer); // one success + endpoint.reportBadSink(sinkPeer); + // did not remove the sink, since we had one successful try + assertEquals(expected - 1, endpoint.getNumSinks()); + + for (int i = 0; i < HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD - 1; i++) { + endpoint.reportBadSink(sinkPeer); + } + // still not remove, since the success reset the counter + assertEquals(expected - 1, endpoint.getNumSinks()); + endpoint.reportBadSink(sinkPeer); + // but we exhausted the tries + assertEquals(expected - 2, endpoint.getNumSinks()); + } + + @Test + public void testReportBadSinkDownToZeroSinks() { + List serverNames = Lists.newArrayList(); + int totalServers = 4; + for (int i = 0; i < totalServers; i++) { + serverNames.add(mock(ServerName.class)); + } + ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); + endpoint.chooseSinks(); + // Sanity check + int expected = (int) (totalServers * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + + ServerName serverNameA = endpoint.getSinkServers().get(0); + ServerName serverNameB = endpoint.getSinkServers().get(1); + + SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); + SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AsyncRegionServerAdmin.class)); + + for (int i = 0; i <= HBaseReplicationEndpoint.DEFAULT_BAD_SINK_THRESHOLD; i++) { + endpoint.reportBadSink(sinkPeerA); + endpoint.reportBadSink(sinkPeerB); + } + + // We've gone down to 0 good sinks, so the replication sinks + // should have been refreshed now, so out of 4 servers, 2 are not considered as they are + // reported as bad. + expected = + (int) ((totalServers - 2) * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + assertEquals(expected, endpoint.getNumSinks()); + } + + private static class DummyHBaseReplicationEndpoint extends HBaseReplicationEndpoint { + + List regionServers; + + public void setRegionServers(List regionServers) { + this.regionServers = regionServers; + } + + @Override + public List fetchSlavesAddresses() { + return regionServers; + } + + @Override + public boolean replicate(ReplicateContext replicateContext) { + return false; + } + + @Override + public AsyncClusterConnection createConnection(Configuration conf) throws IOException { + return null; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java deleted file mode 100644 index f8a2ab917605..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication.regionserver; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; - -import java.util.List; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer; -import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - -@Category({ReplicationTests.class, SmallTests.class}) -public class TestReplicationSinkManager { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSinkManager.class); - - private ReplicationSinkManager sinkManager; - private HBaseReplicationEndpoint replicationEndpoint; - - /** - * Manage the 'getRegionServers' for the tests below. Override the base class handling - * of Regionservers. We used to use a mock for this but updated guava/errorprone disallows - * mocking of classes that implement Service. - */ - private static class SetServersHBaseReplicationEndpoint extends HBaseReplicationEndpoint { - List regionServers; - - @Override - public boolean replicate(ReplicateContext replicateContext) { - return false; - } - - @Override - public synchronized void setRegionServers(List regionServers) { - this.regionServers = regionServers; - } - - @Override - public List getRegionServers() { - return this.regionServers; - } - } - - @Before - public void setUp() { - this.replicationEndpoint = new SetServersHBaseReplicationEndpoint(); - this.sinkManager = new ReplicationSinkManager(mock(AsyncClusterConnection.class), - replicationEndpoint, new Configuration()); - } - - @Test - public void testChooseSinks() { - List serverNames = Lists.newArrayList(); - int totalServers = 20; - for (int i = 0; i < totalServers; i++) { - serverNames.add(mock(ServerName.class)); - } - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - int expected = (int) (totalServers * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkManager.getNumSinks()); - - } - - @Test - public void testChooseSinks_LessThanRatioAvailable() { - List serverNames = Lists.newArrayList(mock(ServerName.class), - mock(ServerName.class)); - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - assertEquals(1, sinkManager.getNumSinks()); - } - - @Test - public void testReportBadSink() { - ServerName serverNameA = mock(ServerName.class); - ServerName serverNameB = mock(ServerName.class); - replicationEndpoint.setRegionServers(Lists.newArrayList(serverNameA, serverNameB)); - sinkManager.chooseSinks(); - // Sanity check - assertEquals(1, sinkManager.getNumSinks()); - - SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); - - sinkManager.reportBadSink(sinkPeer); - - // Just reporting a bad sink once shouldn't have an effect - assertEquals(1, sinkManager.getNumSinks()); - - } - - /** - * Once a SinkPeer has been reported as bad more than BAD_SINK_THRESHOLD times, it should not - * be replicated to anymore. - */ - @Test - public void testReportBadSink_PastThreshold() { - List serverNames = Lists.newArrayList(); - int totalServers = 30; - for (int i = 0; i < totalServers; i++) { - serverNames.add(mock(ServerName.class)); - } - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - // Sanity check - int expected = (int) (totalServers * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkManager.getNumSinks()); - - ServerName serverName = sinkManager.getSinksForTesting().get(0); - - SinkPeer sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class)); - - sinkManager.reportSinkSuccess(sinkPeer); // has no effect, counter does not go negative - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) { - sinkManager.reportBadSink(sinkPeer); - } - - // Reporting a bad sink more than the threshold count should remove it - // from the list of potential sinks - assertEquals(expected - 1, sinkManager.getNumSinks()); - - // - // now try a sink that has some successes - // - serverName = sinkManager.getSinksForTesting().get(0); - - sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class)); - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-1; i++) { - sinkManager.reportBadSink(sinkPeer); - } - sinkManager.reportSinkSuccess(sinkPeer); // one success - sinkManager.reportBadSink(sinkPeer); - - // did not remove the sink, since we had one successful try - assertEquals(expected - 1, sinkManager.getNumSinks()); - - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-2; i++) { - sinkManager.reportBadSink(sinkPeer); - } - // still not remove, since the success reset the counter - assertEquals(expected - 1, sinkManager.getNumSinks()); - - sinkManager.reportBadSink(sinkPeer); - // but we exhausted the tries - assertEquals(expected - 2, sinkManager.getNumSinks()); - } - - @Test - public void testReportBadSink_DownToZeroSinks() { - List serverNames = Lists.newArrayList(); - int totalServers = 4; - for (int i = 0; i < totalServers; i++) { - serverNames.add(mock(ServerName.class)); - } - replicationEndpoint.setRegionServers(serverNames); - sinkManager.chooseSinks(); - // Sanity check - List sinkList = sinkManager.getSinksForTesting(); - int expected = (int) (totalServers * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkList.size()); - - ServerName serverNameA = sinkList.get(0); - ServerName serverNameB = sinkList.get(1); - - SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class)); - SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AsyncRegionServerAdmin.class)); - - for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) { - sinkManager.reportBadSink(sinkPeerA); - sinkManager.reportBadSink(sinkPeerB); - } - - // We've gone down to 0 good sinks, so the replication sinks - // should have been refreshed now, so out of 4 servers, 2 are not considered as they are - // reported as bad. - expected = (int) ((totalServers - 2) * ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO); - assertEquals(expected, sinkManager.getNumSinks()); - } - -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java index 3c88ab315919..090129174cca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; @@ -175,14 +174,9 @@ protected Callable createReplicator(List entries, int ordinal, i } @Override - public synchronized List getRegionServers() { + public synchronized int getNumSinks() { // Return multiple server names for endpoint parallel replication. - return new ArrayList<>( - ImmutableList.of(ServerName.valueOf("www.example.com", 12016, 1525245876026L), - ServerName.valueOf("www.example2.com", 12016, 1525245876026L), - ServerName.valueOf("www.example3.com", 12016, 1525245876026L), - ServerName.valueOf("www.example4.com", 12016, 1525245876026L), - ServerName.valueOf("www.example4.com", 12016, 1525245876026L))); + return 10; } } } From 9fa2f3c3a56c5e28e95a5e65295f62092357d95e Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 22 Sep 2020 20:48:31 -0700 Subject: [PATCH 381/769] Revert "HBASE-25068 Pass WALFactory to Replication so it knows of all WALProviders, not just default/user-space" This reverts commit 17ebf917ba354e4632b726323b2b32af3aa6c8de. --- .../hadoop/hbase/regionserver/HRegionServer.java | 15 ++++++++------- .../hbase/regionserver/ReplicationService.java | 11 +++++++---- .../replication/regionserver/Replication.java | 8 ++++---- .../regionserver/ReplicationSyncUp.java | 6 ++---- .../hbase/replication/TestReplicationBase.java | 2 +- .../TestReplicationSourceManager.java | 3 +-- 6 files changed, 23 insertions(+), 22 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8abede5b272a..f14da2f6a17e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1911,7 +1911,8 @@ private void setupWALAndReplication() throws IOException { throw new IOException("Can not create wal directory " + logDir); } // Instantiate replication if replication enabled. Pass it the log directories. - createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); + createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, + factory.getWALProvider()); } this.walFactory = factory; } @@ -3062,7 +3063,7 @@ public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { * Load the replication executorService objects, if any */ private static void createNewReplicationInstance(Configuration conf, HRegionServer server, - FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException { + FileSystem walFs, Path walDir, Path oldWALDir, WALProvider walProvider) throws IOException { // read in the name of the source replication class from the config file. String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME, HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT); @@ -3075,19 +3076,19 @@ private static void createNewReplicationInstance(Configuration conf, HRegionServ // only one object. if (sourceClassname.equals(sinkClassname)) { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); server.replicationSinkHandler = (ReplicationSinkService) server.replicationSourceHandler; } else { server.replicationSourceHandler = newReplicationInstance(sourceClassname, - ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory); + ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walProvider); server.replicationSinkHandler = newReplicationInstance(sinkClassname, - ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory); + ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walProvider); } } private static T newReplicationInstance(String classname, Class xface, Configuration conf, HRegionServer server, FileSystem walFs, Path logDir, - Path oldLogDir, WALFactory walFactory) throws IOException { + Path oldLogDir, WALProvider walProvider) throws IOException { final Class clazz; try { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); @@ -3096,7 +3097,7 @@ private static T newReplicationInstance(String cl throw new IOException("Could not find class for " + classname); } T service = ReflectionUtils.newInstance(clazz, conf); - service.initialize(server, walFs, logDir, oldLogDir, walFactory); + service.initialize(server, walFs, logDir, oldLogDir, walProvider); return service; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index 33b3321755fa..e9bbaea8ae46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,11 +32,14 @@ */ @InterfaceAudience.Private public interface ReplicationService { + /** * Initializes the replication service object. + * @param walProvider can be null if not initialized inside a live region server environment, for + * example, {@code ReplicationSyncUp}. */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALFactory walFactory) - throws IOException; + void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALProvider walProvider) + throws IOException; /** * Start replication services. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index d8a696c7172e..195877bf5f3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; -import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.yetus.audience.InterfaceAudience; @@ -90,7 +89,7 @@ public Replication() { @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, - WALFactory walFactory) throws IOException { + WALProvider walProvider) throws IOException { this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = @@ -129,7 +128,6 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager(); this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); - WALProvider walProvider = walFactory.getWALProvider(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, walProvider != null ? walProvider.getWALFileLengthProvider() : p -> OptionalLong.empty(), @@ -200,6 +198,7 @@ public void join() { * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace * directory required for replicating hfiles * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory + * @throws IOException */ @Override public void replicateLogEntries(List entries, CellScanner cells, @@ -212,6 +211,7 @@ public void replicateLogEntries(List entries, CellScanner cells, /** * If replication is enabled and this cluster is a master, * it starts + * @throws IOException */ @Override public void startReplicationService() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index b04c7eb75f02..98490f137dbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -83,8 +82,7 @@ public boolean isAborted() { System.out.println("Start Replication Server start"); Replication replication = new Replication(); - replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, - new WALFactory(conf, "test", false)); + replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, null); ReplicationSourceManager manager = replication.getReplicationManager(); manager.init().get(); while (manager.activeFailoverTaskCount() > 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 455b27298156..6e1692a9a2bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 4abb00fee03c..8e38114fa0a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -194,8 +194,7 @@ protected static void setupZkAndReplication() throws Exception { logDir = utility.getDataTestDir(HConstants.HREGION_LOGDIR_NAME); remoteLogDir = utility.getDataTestDir(ReplicationUtils.REMOTE_WAL_DIR_NAME); replication = new Replication(); - replication.initialize(new DummyServer(), fs, logDir, oldLogDir, - new WALFactory(conf, "test", false)); + replication.initialize(new DummyServer(), fs, logDir, oldLogDir, null); managerOfCluster = getManagerFromCluster(); if (managerOfCluster != null) { // After replication procedure, we need to add peer by hand (other than by receiving From 82f3301926c012d2be8c3be9ed0fa059a398054f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BC=9F?= Date: Wed, 23 Sep 2020 14:36:00 +0800 Subject: [PATCH 382/769] Should not use XXXService.Interface.class.getSimpleName as stub key prefix in AsyncConnectionImpl (#2443) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 97b70e1a7ad8..406af0d4fdd6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -254,7 +254,7 @@ private ClientService.Interface createRegionServerStub(ServerName serverName) th ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(rsStubs, - getStubKey(ClientService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(ClientService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createRegionServerStub(serverName)); } @@ -268,7 +268,7 @@ private AdminService.Interface createAdminServerStub(ServerName serverName) thro AdminService.Interface getAdminStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(adminSubs, - getStubKey(AdminService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(AdminService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createAdminServerStub(serverName)); } From cb7ede600b8318a0c6a81d7393cafed585c8a1a2 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 23 Sep 2020 14:37:27 +0800 Subject: [PATCH 383/769] Revert "Should not use XXXService.Interface.class.getSimpleName as stub key prefix in AsyncConnectionImpl (#2443)" This reverts commit 98225ff1a369add92f71e5ee04052fdd8875c2f8. --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 406af0d4fdd6..97b70e1a7ad8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -254,7 +254,7 @@ private ClientService.Interface createRegionServerStub(ServerName serverName) th ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(rsStubs, - getStubKey(ClientService.getDescriptor().getName(), serverName, hostnameCanChange), + getStubKey(ClientService.Interface.class.getSimpleName(), serverName, hostnameCanChange), () -> createRegionServerStub(serverName)); } @@ -268,7 +268,7 @@ private AdminService.Interface createAdminServerStub(ServerName serverName) thro AdminService.Interface getAdminStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(adminSubs, - getStubKey(AdminService.getDescriptor().getName(), serverName, hostnameCanChange), + getStubKey(AdminService.Interface.class.getSimpleName(), serverName, hostnameCanChange), () -> createAdminServerStub(serverName)); } From a322c489060c3bef7d18c4fd38e264a1e7434612 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BC=9F?= Date: Wed, 23 Sep 2020 14:36:00 +0800 Subject: [PATCH 384/769] HBASE-25073 Should not use XXXService.Interface.class.getSimpleName as stub key prefix in AsyncConnectionImpl (#2443) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 97b70e1a7ad8..406af0d4fdd6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -254,7 +254,7 @@ private ClientService.Interface createRegionServerStub(ServerName serverName) th ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(rsStubs, - getStubKey(ClientService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(ClientService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createRegionServerStub(serverName)); } @@ -268,7 +268,7 @@ private AdminService.Interface createAdminServerStub(ServerName serverName) thro AdminService.Interface getAdminStub(ServerName serverName) throws IOException { return ConcurrentMapUtils.computeIfAbsentEx(adminSubs, - getStubKey(AdminService.Interface.class.getSimpleName(), serverName, hostnameCanChange), + getStubKey(AdminService.getDescriptor().getName(), serverName, hostnameCanChange), () -> createAdminServerStub(serverName)); } From 21821e402e3ea9498eb71afff11c9853b8811be4 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 23 Sep 2020 14:56:50 -0700 Subject: [PATCH 385/769] HBASE-25079 Upgrade Bootstrap to 3.3.7 (#2442) Signed-off-by: Viraj Jasani --- .../static/css/bootstrap-theme.css | 394 - .../static/css/bootstrap-theme.min.css | 14 +- .../hbase-webapps/static/css/bootstrap.css | 6805 ----------------- .../static/css/bootstrap.min.css | 13 +- .../fonts/glyphicons-halflings-regular.eot | Bin 14079 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 480 +- .../fonts/glyphicons-halflings-regular.ttf | Bin 29512 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 16448 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../hbase-webapps/static/js/bootstrap.js | 1999 ----- .../hbase-webapps/static/js/bootstrap.min.js | 13 +- .../static/css/bootstrap-theme.css | 394 - .../static/css/bootstrap-theme.min.css | 14 +- .../hbase-webapps/static/css/bootstrap.css | 6805 ----------------- .../static/css/bootstrap.min.css | 13 +- .../fonts/glyphicons-halflings-regular.eot | Bin 14079 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 480 +- .../fonts/glyphicons-halflings-regular.ttf | Bin 29512 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 16448 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../hbase-webapps/static/js/bootstrap.js | 1999 ----- .../hbase-webapps/static/js/bootstrap.min.js | 13 +- pom.xml | 2 + 23 files changed, 576 insertions(+), 18862 deletions(-) delete mode 100755 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css delete mode 100755 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff create mode 100644 hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff2 delete mode 100755 hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js mode change 100755 => 100644 hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.min.js delete mode 100755 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css delete mode 100755 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap.css mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/css/bootstrap.min.css mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.svg mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff create mode 100644 hbase-thrift/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff2 delete mode 100755 hbase-thrift/src/main/resources/hbase-webapps/static/js/bootstrap.js mode change 100755 => 100644 hbase-thrift/src/main/resources/hbase-webapps/static/js/bootstrap.min.js diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css deleted file mode 100755 index 10c9ff578722..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.css +++ /dev/null @@ -1,394 +0,0 @@ -/*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ - -.btn-default, -.btn-primary, -.btn-success, -.btn-info, -.btn-warning, -.btn-danger { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.btn-default:active, -.btn-primary:active, -.btn-success:active, -.btn-info:active, -.btn-warning:active, -.btn-danger:active, -.btn-default.active, -.btn-primary.active, -.btn-success.active, -.btn-info.active, -.btn-warning.active, -.btn-danger.active { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn:active, -.btn.active { - background-image: none; -} - -.btn-default { - text-shadow: 0 1px 0 #fff; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#e6e6e6)); - background-image: -webkit-linear-gradient(top, #ffffff, 0%, #e6e6e6, 100%); - background-image: -moz-linear-gradient(top, #ffffff 0%, #e6e6e6 100%); - background-image: linear-gradient(to bottom, #ffffff 0%, #e6e6e6 100%); - background-repeat: repeat-x; - border-color: #e0e0e0; - border-color: #ccc; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0); -} - -.btn-default:active, -.btn-default.active { - background-color: #e6e6e6; - border-color: #e0e0e0; -} - -.btn-primary { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); - background-repeat: repeat-x; - border-color: #2d6ca2; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); -} - -.btn-primary:active, -.btn-primary.active { - background-color: #3071a9; - border-color: #2d6ca2; -} - -.btn-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44)); - background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%); - background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); - background-repeat: repeat-x; - border-color: #419641; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); -} - -.btn-success:active, -.btn-success.active { - background-color: #449d44; - border-color: #419641; -} - -.btn-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f)); - background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%); - background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); - background-repeat: repeat-x; - border-color: #eb9316; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); -} - -.btn-warning:active, -.btn-warning.active { - background-color: #ec971f; - border-color: #eb9316; -} - -.btn-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c)); - background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%); - background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); - background-repeat: repeat-x; - border-color: #c12e2a; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); -} - -.btn-danger:active, -.btn-danger.active { - background-color: #c9302c; - border-color: #c12e2a; -} - -.btn-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5)); - background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%); - background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); - background-repeat: repeat-x; - border-color: #2aabd2; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); -} - -.btn-info:active, -.btn-info.active { - background-color: #31b0d5; - border-color: #2aabd2; -} - -.thumbnail, -.img-thumbnail { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} - -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus, -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - background-color: #357ebd; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); -} - -.navbar { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#f8f8f8)); - background-image: -webkit-linear-gradient(top, #ffffff, 0%, #f8f8f8, 100%); - background-image: -moz-linear-gradient(top, #ffffff 0%, #f8f8f8 100%); - background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%); - background-repeat: repeat-x; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); -} - -.navbar .navbar-nav > .active > a { - background-color: #f8f8f8; -} - -.navbar-brand, -.navbar-nav > li > a { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25); -} - -.navbar-inverse { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#3c3c3c), to(#222222)); - background-image: -webkit-linear-gradient(top, #3c3c3c, 0%, #222222, 100%); - background-image: -moz-linear-gradient(top, #3c3c3c 0%, #222222 100%); - background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0); -} - -.navbar-inverse .navbar-nav > .active > a { - background-color: #222222; -} - -.navbar-inverse .navbar-brand, -.navbar-inverse .navbar-nav > li > a { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} - -.navbar-static-top, -.navbar-fixed-top, -.navbar-fixed-bottom { - border-radius: 0; -} - -.alert { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.alert-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#c8e5bc)); - background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #c8e5bc, 100%); - background-image: -moz-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); - background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%); - background-repeat: repeat-x; - border-color: #b2dba1; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0); -} - -.alert-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#b9def0)); - background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #b9def0, 100%); - background-image: -moz-linear-gradient(top, #d9edf7 0%, #b9def0 100%); - background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%); - background-repeat: repeat-x; - border-color: #9acfea; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0); -} - -.alert-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#f8efc0)); - background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #f8efc0, 100%); - background-image: -moz-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%); - background-repeat: repeat-x; - border-color: #f5e79e; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0); -} - -.alert-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#e7c3c3)); - background-image: -webkit-linear-gradient(top, #f2dede, 0%, #e7c3c3, 100%); - background-image: -moz-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); - background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%); - background-repeat: repeat-x; - border-color: #dca7a7; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0); -} - -.progress { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ebebeb), to(#f5f5f5)); - background-image: -webkit-linear-gradient(top, #ebebeb, 0%, #f5f5f5, 100%); - background-image: -moz-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); - background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0); -} - -.progress-bar { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3071a9, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); -} - -.progress-bar-success { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44)); - background-image: -webkit-linear-gradient(top, #5cb85c, 0%, #449d44, 100%); - background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); -} - -.progress-bar-info { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5)); - background-image: -webkit-linear-gradient(top, #5bc0de, 0%, #31b0d5, 100%); - background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); -} - -.progress-bar-warning { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f)); - background-image: -webkit-linear-gradient(top, #f0ad4e, 0%, #ec971f, 100%); - background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); -} - -.progress-bar-danger { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c)); - background-image: -webkit-linear-gradient(top, #d9534f, 0%, #c9302c, 100%); - background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); -} - -.list-group { - border-radius: 4px; - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} - -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - text-shadow: 0 -1px 0 #3071a9; - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3278b3)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #3278b3, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #3278b3 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%); - background-repeat: repeat-x; - border-color: #3278b3; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0); -} - -.panel { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.panel-default > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f5f5f5), to(#e8e8e8)); - background-image: -webkit-linear-gradient(top, #f5f5f5, 0%, #e8e8e8, 100%); - background-image: -moz-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); - background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); -} - -.panel-primary > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); - background-image: -webkit-linear-gradient(top, #428bca, 0%, #357ebd, 100%); - background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); - background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); -} - -.panel-success > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#d0e9c6)); - background-image: -webkit-linear-gradient(top, #dff0d8, 0%, #d0e9c6, 100%); - background-image: -moz-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); - background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0); -} - -.panel-info > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#c4e3f3)); - background-image: -webkit-linear-gradient(top, #d9edf7, 0%, #c4e3f3, 100%); - background-image: -moz-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); - background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0); -} - -.panel-warning > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#faf2cc)); - background-image: -webkit-linear-gradient(top, #fcf8e3, 0%, #faf2cc, 100%); - background-image: -moz-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0); -} - -.panel-danger > .panel-heading { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#ebcccc)); - background-image: -webkit-linear-gradient(top, #f2dede, 0%, #ebcccc, 100%); - background-image: -moz-linear-gradient(top, #f2dede 0%, #ebcccc 100%); - background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0); -} - -.well { - background-image: -webkit-gradient(linear, left 0%, left 100%, from(#e8e8e8), to(#f5f5f5)); - background-image: -webkit-linear-gradient(top, #e8e8e8, 0%, #f5f5f5, 100%); - background-image: -moz-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); - background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%); - background-repeat: repeat-x; - border-color: #dcdcdc; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0); - -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); -} \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css old mode 100755 new mode 100644 index c31428b07eac..5e39401957d8 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap-theme.min.css @@ -1,10 +1,6 @@ /*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ -.btn-default,.btn-primary,.btn-success,.btn-info,.btn-warning,.btn-danger{text-shadow:0 -1px 0 rgba(0,0,0,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075)}.btn-default:active,.btn-primary:active,.btn-success:active,.btn-info:active,.btn-warning:active,.btn-danger:active,.btn-default.active,.btn-primary.active,.btn-success.active,.btn-info.active,.btn-warning.active,.btn-danger.active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn:active,.btn.active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,0%,#e6e6e6,100%);background-image:-moz-linear-gradient(top,#fff 0,#e6e6e6 100%);background-image:linear-gradient(to bottom,#fff 0,#e6e6e6 100%);background-repeat:repeat-x;border-color:#e0e0e0;border-color:#ccc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0)}.btn-default:active,.btn-default.active{background-color:#e6e6e6;border-color:#e0e0e0}.btn-primary{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;border-color:#2d6ca2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.btn-primary:active,.btn-primary.active{background-color:#3071a9;border-color:#2d6ca2}.btn-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;border-color:#419641;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.btn-success:active,.btn-success.active{background-color:#449d44;border-color:#419641}.btn-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;border-color:#eb9316;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.btn-warning:active,.btn-warning.active{background-color:#ec971f;border-color:#eb9316}.btn-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;border-color:#c12e2a;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.btn-danger:active,.btn-danger.active{background-color:#c9302c;border-color:#c12e2a}.btn-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;border-color:#2aabd2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.btn-info:active,.btn-info.active{background-color:#31b0d5;border-color:#2aabd2}.thumbnail,.img-thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus,.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{background-color:#357ebd;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.navbar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#f8f8f8));background-image:-webkit-linear-gradient(top,#fff,0%,#f8f8f8,100%);background-image:-moz-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);background-repeat:repeat-x;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff8f8f8',GradientType=0);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075)}.navbar .navbar-nav>.active>a{background-color:#f8f8f8}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,0.25)}.navbar-inverse{background-image:-webkit-gradient(linear,left 0,left 100%,from(#3c3c3c),to(#222));background-image:-webkit-linear-gradient(top,#3c3c3c,0%,#222,100%);background-image:-moz-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c',endColorstr='#ff222222',GradientType=0)}.navbar-inverse .navbar-nav>.active>a{background-color:#222}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-static-top,.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}.alert{text-shadow:0 1px 0 rgba(255,255,255,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05)}.alert-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#c8e5bc));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#c8e5bc,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);background-repeat:repeat-x;border-color:#b2dba1;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffc8e5bc',GradientType=0)}.alert-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#b9def0));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#b9def0,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);background-repeat:repeat-x;border-color:#9acfea;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffb9def0',GradientType=0)}.alert-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#f8efc0));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#f8efc0,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);background-repeat:repeat-x;border-color:#f5e79e;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fff8efc0',GradientType=0)}.alert-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#e7c3c3));background-image:-webkit-linear-gradient(top,#f2dede,0%,#e7c3c3,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);background-repeat:repeat-x;border-color:#dca7a7;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffe7c3c3',GradientType=0)}.progress{background-image:-webkit-gradient(linear,left 0,left 100%,from(#ebebeb),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#ebebeb,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb',endColorstr='#fff5f5f5',GradientType=0)}.progress-bar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.progress-bar-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.progress-bar-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.progress-bar-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.progress-bar-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{text-shadow:0 -1px 0 #3071a9;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3278b3));background-image:-webkit-linear-gradient(top,#428bca,0%,#3278b3,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3278b3 100%);background-image:linear-gradient(to bottom,#428bca 0,#3278b3 100%);background-repeat:repeat-x;border-color:#3278b3;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3278b3',GradientType=0)}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.panel-default>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f5f5f5),to(#e8e8e8));background-image:-webkit-linear-gradient(top,#f5f5f5,0%,#e8e8e8,100%);background-image:-moz-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#ffe8e8e8',GradientType=0)}.panel-primary>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.panel-success>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#d0e9c6));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#d0e9c6,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffd0e9c6',GradientType=0)}.panel-info>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#c4e3f3));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#c4e3f3,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffc4e3f3',GradientType=0)}.panel-warning>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#faf2cc));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#faf2cc,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fffaf2cc',GradientType=0)}.panel-danger>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#ebcccc));background-image:-webkit-linear-gradient(top,#f2dede,0%,#ebcccc,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffebcccc',GradientType=0)}.well{background-image:-webkit-gradient(linear,left 0,left 100%,from(#e8e8e8),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#e8e8e8,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);background-repeat:repeat-x;border-color:#dcdcdc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1)} \ No newline at end of file + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} +/*# sourceMappingURL=bootstrap-theme.min.css.map */ \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css deleted file mode 100755 index bbda4eed4afd..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.css +++ /dev/null @@ -1,6805 +0,0 @@ -/*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - */ - -/*! normalize.css v2.1.0 | MIT License | git.io/normalize */ - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -main, -nav, -section, -summary { - display: block; -} - -audio, -canvas, -video { - display: inline-block; -} - -audio:not([controls]) { - display: none; - height: 0; -} - -[hidden] { - display: none; -} - -html { - font-family: sans-serif; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100%; -} - -body { - margin: 0; -} - -a:focus { - outline: thin dotted; -} - -a:active, -a:hover { - outline: 0; -} - -h1 { - margin: 0.67em 0; - font-size: 2em; -} - -abbr[title] { - border-bottom: 1px dotted; -} - -b, -strong { - font-weight: bold; -} - -dfn { - font-style: italic; -} - -hr { - height: 0; - -moz-box-sizing: content-box; - box-sizing: content-box; -} - -mark { - color: #000; - background: #ff0; -} - -code, -kbd, -pre, -samp { - font-family: monospace, serif; - font-size: 1em; -} - -pre { - white-space: pre-wrap; -} - -q { - quotes: "\201C" "\201D" "\2018" "\2019"; -} - -small { - font-size: 80%; -} - -sub, -sup { - position: relative; - font-size: 75%; - line-height: 0; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -img { - border: 0; -} - -svg:not(:root) { - overflow: hidden; -} - -figure { - margin: 0; -} - -fieldset { - padding: 0.35em 0.625em 0.75em; - margin: 0 2px; - border: 1px solid #c0c0c0; -} - -legend { - padding: 0; - border: 0; -} - -button, -input, -select, -textarea { - margin: 0; - font-family: inherit; - font-size: 100%; -} - -button, -input { - line-height: normal; -} - -button, -select { - text-transform: none; -} - -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - cursor: pointer; - -webkit-appearance: button; -} - -button[disabled], -html input[disabled] { - cursor: default; -} - -input[type="checkbox"], -input[type="radio"] { - padding: 0; - box-sizing: border-box; -} - -input[type="search"] { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - -webkit-appearance: textfield; -} - -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} - -button::-moz-focus-inner, -input::-moz-focus-inner { - padding: 0; - border: 0; -} - -textarea { - overflow: auto; - vertical-align: top; -} - -table { - border-collapse: collapse; - border-spacing: 0; -} - -@media print { - * { - color: #000 !important; - text-shadow: none !important; - background: transparent !important; - box-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - .ir a:after, - a[href^="javascript:"]:after, - a[href^="#"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - @page { - margin: 2cm .5cm; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } - .navbar { - display: none; - } - .table td, - .table th { - background-color: #fff !important; - } - .btn > .caret, - .dropup > .btn > .caret { - border-top-color: #000 !important; - } - .label { - border: 1px solid #000; - } - .table { - border-collapse: collapse !important; - } - .table-bordered th, - .table-bordered td { - border: 1px solid #ddd !important; - } -} - -*, -*:before, -*:after { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -html { - font-size: 62.5%; - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); -} - -body { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 1.428571429; - color: #333333; - background-color: #ffffff; -} - -input, -button, -select, -textarea { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} - -button, -input, -select[multiple], -textarea { - background-image: none; -} - -a { - color: #428bca; - text-decoration: none; -} - -a:hover, -a:focus { - color: #2a6496; - text-decoration: underline; -} - -a:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -img { - vertical-align: middle; -} - -.img-responsive { - display: block; - height: auto; - max-width: 100%; -} - -.img-rounded { - border-radius: 6px; -} - -.img-thumbnail { - display: inline-block; - height: auto; - max-width: 100%; - padding: 4px; - line-height: 1.428571429; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -.img-circle { - border-radius: 50%; -} - -hr { - margin-top: 20px; - margin-bottom: 20px; - border: 0; - border-top: 1px solid #eeeeee; -} - -.sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0 0 0 0); - border: 0; -} - -p { - margin: 0 0 10px; -} - -.lead { - margin-bottom: 20px; - font-size: 16.099999999999998px; - font-weight: 200; - line-height: 1.4; -} - -@media (min-width: 768px) { - .lead { - font-size: 21px; - } -} - -small { - font-size: 85%; -} - -cite { - font-style: normal; -} - -.text-muted { - color: #999999; -} - -.text-primary { - color: #428bca; -} - -.text-warning { - color: #c09853; -} - -.text-danger { - color: #b94a48; -} - -.text-success { - color: #468847; -} - -.text-info { - color: #3a87ad; -} - -.text-left { - text-align: left; -} - -.text-right { - text-align: right; -} - -.text-center { - text-align: center; -} - -h1, -h2, -h3, -h4, -h5, -h6, -.h1, -.h2, -.h3, -.h4, -.h5, -.h6 { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-weight: 500; - line-height: 1.1; -} - -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small, -.h1 small, -.h2 small, -.h3 small, -.h4 small, -.h5 small, -.h6 small { - font-weight: normal; - line-height: 1; - color: #999999; -} - -h1, -h2, -h3 { - margin-top: 20px; - margin-bottom: 10px; -} - -h4, -h5, -h6 { - margin-top: 10px; - margin-bottom: 10px; -} - -h1, -.h1 { - font-size: 36px; -} - -h2, -.h2 { - font-size: 30px; -} - -h3, -.h3 { - font-size: 24px; -} - -h4, -.h4 { - font-size: 18px; -} - -h5, -.h5 { - font-size: 14px; -} - -h6, -.h6 { - font-size: 12px; -} - -h1 small, -.h1 small { - font-size: 24px; -} - -h2 small, -.h2 small { - font-size: 18px; -} - -h3 small, -.h3 small, -h4 small, -.h4 small { - font-size: 14px; -} - -.page-header { - padding-bottom: 9px; - margin: 40px 0 20px; - border-bottom: 1px solid #eeeeee; -} - -ul, -ol { - margin-top: 0; - margin-bottom: 10px; -} - -ul ul, -ol ul, -ul ol, -ol ol { - margin-bottom: 0; -} - -.list-unstyled { - padding-left: 0; - list-style: none; -} - -.list-inline { - padding-left: 0; - list-style: none; -} - -.list-inline > li { - display: inline-block; - padding-right: 5px; - padding-left: 5px; -} - -dl { - margin-bottom: 20px; -} - -dt, -dd { - line-height: 1.428571429; -} - -dt { - font-weight: bold; -} - -dd { - margin-left: 0; -} - -@media (min-width: 768px) { - .dl-horizontal dt { - float: left; - width: 160px; - overflow: hidden; - clear: left; - text-align: right; - text-overflow: ellipsis; - white-space: nowrap; - } - .dl-horizontal dd { - margin-left: 180px; - } - .dl-horizontal dd:before, - .dl-horizontal dd:after { - display: table; - content: " "; - } - .dl-horizontal dd:after { - clear: both; - } - .dl-horizontal dd:before, - .dl-horizontal dd:after { - display: table; - content: " "; - } - .dl-horizontal dd:after { - clear: both; - } -} - -abbr[title], -abbr[data-original-title] { - cursor: help; - border-bottom: 1px dotted #999999; -} - -abbr.initialism { - font-size: 90%; - text-transform: uppercase; -} - -blockquote { - padding: 10px 20px; - margin: 0 0 20px; - border-left: 5px solid #eeeeee; -} - -blockquote p { - font-size: 17.5px; - font-weight: 300; - line-height: 1.25; -} - -blockquote p:last-child { - margin-bottom: 0; -} - -blockquote small { - display: block; - line-height: 1.428571429; - color: #999999; -} - -blockquote small:before { - content: '\2014 \00A0'; -} - -blockquote.pull-right { - padding-right: 15px; - padding-left: 0; - border-right: 5px solid #eeeeee; - border-left: 0; -} - -blockquote.pull-right p, -blockquote.pull-right small { - text-align: right; -} - -blockquote.pull-right small:before { - content: ''; -} - -blockquote.pull-right small:after { - content: '\00A0 \2014'; -} - -q:before, -q:after, -blockquote:before, -blockquote:after { - content: ""; -} - -address { - display: block; - margin-bottom: 20px; - font-style: normal; - line-height: 1.428571429; -} - -code, -pre { - font-family: Monaco, Menlo, Consolas, "Courier New", monospace; -} - -code { - padding: 2px 4px; - font-size: 90%; - color: #c7254e; - white-space: nowrap; - background-color: #f9f2f4; - border-radius: 4px; -} - -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 1.428571429; - color: #333333; - word-break: break-all; - word-wrap: break-word; - background-color: #f5f5f5; - border: 1px solid #cccccc; - border-radius: 4px; -} - -pre.prettyprint { - margin-bottom: 20px; -} - -pre code { - padding: 0; - font-size: inherit; - color: inherit; - white-space: pre-wrap; - background-color: transparent; - border: 0; -} - -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} - -.container { - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; -} - -.container:before, -.container:after { - display: table; - content: " "; -} - -.container:after { - clear: both; -} - -.container:before, -.container:after { - display: table; - content: " "; -} - -.container:after { - clear: both; -} - -.row { - margin-right: -15px; - margin-left: -15px; -} - -.row:before, -.row:after { - display: table; - content: " "; -} - -.row:after { - clear: both; -} - -.row:before, -.row:after { - display: table; - content: " "; -} - -.row:after { - clear: both; -} - -.col-xs-1, -.col-xs-2, -.col-xs-3, -.col-xs-4, -.col-xs-5, -.col-xs-6, -.col-xs-7, -.col-xs-8, -.col-xs-9, -.col-xs-10, -.col-xs-11, -.col-xs-12, -.col-sm-1, -.col-sm-2, -.col-sm-3, -.col-sm-4, -.col-sm-5, -.col-sm-6, -.col-sm-7, -.col-sm-8, -.col-sm-9, -.col-sm-10, -.col-sm-11, -.col-sm-12, -.col-md-1, -.col-md-2, -.col-md-3, -.col-md-4, -.col-md-5, -.col-md-6, -.col-md-7, -.col-md-8, -.col-md-9, -.col-md-10, -.col-md-11, -.col-md-12, -.col-lg-1, -.col-lg-2, -.col-lg-3, -.col-lg-4, -.col-lg-5, -.col-lg-6, -.col-lg-7, -.col-lg-8, -.col-lg-9, -.col-lg-10, -.col-lg-11, -.col-lg-12 { - position: relative; - min-height: 1px; - padding-right: 15px; - padding-left: 15px; -} - -.col-xs-1, -.col-xs-2, -.col-xs-3, -.col-xs-4, -.col-xs-5, -.col-xs-6, -.col-xs-7, -.col-xs-8, -.col-xs-9, -.col-xs-10, -.col-xs-11 { - float: left; -} - -.col-xs-1 { - width: 8.333333333333332%; -} - -.col-xs-2 { - width: 16.666666666666664%; -} - -.col-xs-3 { - width: 25%; -} - -.col-xs-4 { - width: 33.33333333333333%; -} - -.col-xs-5 { - width: 41.66666666666667%; -} - -.col-xs-6 { - width: 50%; -} - -.col-xs-7 { - width: 58.333333333333336%; -} - -.col-xs-8 { - width: 66.66666666666666%; -} - -.col-xs-9 { - width: 75%; -} - -.col-xs-10 { - width: 83.33333333333334%; -} - -.col-xs-11 { - width: 91.66666666666666%; -} - -.col-xs-12 { - width: 100%; -} - -@media (min-width: 768px) { - .container { - max-width: 750px; - } - .col-sm-1, - .col-sm-2, - .col-sm-3, - .col-sm-4, - .col-sm-5, - .col-sm-6, - .col-sm-7, - .col-sm-8, - .col-sm-9, - .col-sm-10, - .col-sm-11 { - float: left; - } - .col-sm-1 { - width: 8.333333333333332%; - } - .col-sm-2 { - width: 16.666666666666664%; - } - .col-sm-3 { - width: 25%; - } - .col-sm-4 { - width: 33.33333333333333%; - } - .col-sm-5 { - width: 41.66666666666667%; - } - .col-sm-6 { - width: 50%; - } - .col-sm-7 { - width: 58.333333333333336%; - } - .col-sm-8 { - width: 66.66666666666666%; - } - .col-sm-9 { - width: 75%; - } - .col-sm-10 { - width: 83.33333333333334%; - } - .col-sm-11 { - width: 91.66666666666666%; - } - .col-sm-12 { - width: 100%; - } - .col-sm-push-1 { - left: 8.333333333333332%; - } - .col-sm-push-2 { - left: 16.666666666666664%; - } - .col-sm-push-3 { - left: 25%; - } - .col-sm-push-4 { - left: 33.33333333333333%; - } - .col-sm-push-5 { - left: 41.66666666666667%; - } - .col-sm-push-6 { - left: 50%; - } - .col-sm-push-7 { - left: 58.333333333333336%; - } - .col-sm-push-8 { - left: 66.66666666666666%; - } - .col-sm-push-9 { - left: 75%; - } - .col-sm-push-10 { - left: 83.33333333333334%; - } - .col-sm-push-11 { - left: 91.66666666666666%; - } - .col-sm-pull-1 { - right: 8.333333333333332%; - } - .col-sm-pull-2 { - right: 16.666666666666664%; - } - .col-sm-pull-3 { - right: 25%; - } - .col-sm-pull-4 { - right: 33.33333333333333%; - } - .col-sm-pull-5 { - right: 41.66666666666667%; - } - .col-sm-pull-6 { - right: 50%; - } - .col-sm-pull-7 { - right: 58.333333333333336%; - } - .col-sm-pull-8 { - right: 66.66666666666666%; - } - .col-sm-pull-9 { - right: 75%; - } - .col-sm-pull-10 { - right: 83.33333333333334%; - } - .col-sm-pull-11 { - right: 91.66666666666666%; - } - .col-sm-offset-1 { - margin-left: 8.333333333333332%; - } - .col-sm-offset-2 { - margin-left: 16.666666666666664%; - } - .col-sm-offset-3 { - margin-left: 25%; - } - .col-sm-offset-4 { - margin-left: 33.33333333333333%; - } - .col-sm-offset-5 { - margin-left: 41.66666666666667%; - } - .col-sm-offset-6 { - margin-left: 50%; - } - .col-sm-offset-7 { - margin-left: 58.333333333333336%; - } - .col-sm-offset-8 { - margin-left: 66.66666666666666%; - } - .col-sm-offset-9 { - margin-left: 75%; - } - .col-sm-offset-10 { - margin-left: 83.33333333333334%; - } - .col-sm-offset-11 { - margin-left: 91.66666666666666%; - } -} - -@media (min-width: 992px) { - .container { - max-width: 970px; - } - .col-md-1, - .col-md-2, - .col-md-3, - .col-md-4, - .col-md-5, - .col-md-6, - .col-md-7, - .col-md-8, - .col-md-9, - .col-md-10, - .col-md-11 { - float: left; - } - .col-md-1 { - width: 8.333333333333332%; - } - .col-md-2 { - width: 16.666666666666664%; - } - .col-md-3 { - width: 25%; - } - .col-md-4 { - width: 33.33333333333333%; - } - .col-md-5 { - width: 41.66666666666667%; - } - .col-md-6 { - width: 50%; - } - .col-md-7 { - width: 58.333333333333336%; - } - .col-md-8 { - width: 66.66666666666666%; - } - .col-md-9 { - width: 75%; - } - .col-md-10 { - width: 83.33333333333334%; - } - .col-md-11 { - width: 91.66666666666666%; - } - .col-md-12 { - width: 100%; - } - .col-md-push-0 { - left: auto; - } - .col-md-push-1 { - left: 8.333333333333332%; - } - .col-md-push-2 { - left: 16.666666666666664%; - } - .col-md-push-3 { - left: 25%; - } - .col-md-push-4 { - left: 33.33333333333333%; - } - .col-md-push-5 { - left: 41.66666666666667%; - } - .col-md-push-6 { - left: 50%; - } - .col-md-push-7 { - left: 58.333333333333336%; - } - .col-md-push-8 { - left: 66.66666666666666%; - } - .col-md-push-9 { - left: 75%; - } - .col-md-push-10 { - left: 83.33333333333334%; - } - .col-md-push-11 { - left: 91.66666666666666%; - } - .col-md-pull-0 { - right: auto; - } - .col-md-pull-1 { - right: 8.333333333333332%; - } - .col-md-pull-2 { - right: 16.666666666666664%; - } - .col-md-pull-3 { - right: 25%; - } - .col-md-pull-4 { - right: 33.33333333333333%; - } - .col-md-pull-5 { - right: 41.66666666666667%; - } - .col-md-pull-6 { - right: 50%; - } - .col-md-pull-7 { - right: 58.333333333333336%; - } - .col-md-pull-8 { - right: 66.66666666666666%; - } - .col-md-pull-9 { - right: 75%; - } - .col-md-pull-10 { - right: 83.33333333333334%; - } - .col-md-pull-11 { - right: 91.66666666666666%; - } - .col-md-offset-0 { - margin-left: 0; - } - .col-md-offset-1 { - margin-left: 8.333333333333332%; - } - .col-md-offset-2 { - margin-left: 16.666666666666664%; - } - .col-md-offset-3 { - margin-left: 25%; - } - .col-md-offset-4 { - margin-left: 33.33333333333333%; - } - .col-md-offset-5 { - margin-left: 41.66666666666667%; - } - .col-md-offset-6 { - margin-left: 50%; - } - .col-md-offset-7 { - margin-left: 58.333333333333336%; - } - .col-md-offset-8 { - margin-left: 66.66666666666666%; - } - .col-md-offset-9 { - margin-left: 75%; - } - .col-md-offset-10 { - margin-left: 83.33333333333334%; - } - .col-md-offset-11 { - margin-left: 91.66666666666666%; - } -} - -@media (min-width: 1200px) { - .container { - max-width: 1170px; - } - .col-lg-1, - .col-lg-2, - .col-lg-3, - .col-lg-4, - .col-lg-5, - .col-lg-6, - .col-lg-7, - .col-lg-8, - .col-lg-9, - .col-lg-10, - .col-lg-11 { - float: left; - } - .col-lg-1 { - width: 8.333333333333332%; - } - .col-lg-2 { - width: 16.666666666666664%; - } - .col-lg-3 { - width: 25%; - } - .col-lg-4 { - width: 33.33333333333333%; - } - .col-lg-5 { - width: 41.66666666666667%; - } - .col-lg-6 { - width: 50%; - } - .col-lg-7 { - width: 58.333333333333336%; - } - .col-lg-8 { - width: 66.66666666666666%; - } - .col-lg-9 { - width: 75%; - } - .col-lg-10 { - width: 83.33333333333334%; - } - .col-lg-11 { - width: 91.66666666666666%; - } - .col-lg-12 { - width: 100%; - } - .col-lg-push-0 { - left: auto; - } - .col-lg-push-1 { - left: 8.333333333333332%; - } - .col-lg-push-2 { - left: 16.666666666666664%; - } - .col-lg-push-3 { - left: 25%; - } - .col-lg-push-4 { - left: 33.33333333333333%; - } - .col-lg-push-5 { - left: 41.66666666666667%; - } - .col-lg-push-6 { - left: 50%; - } - .col-lg-push-7 { - left: 58.333333333333336%; - } - .col-lg-push-8 { - left: 66.66666666666666%; - } - .col-lg-push-9 { - left: 75%; - } - .col-lg-push-10 { - left: 83.33333333333334%; - } - .col-lg-push-11 { - left: 91.66666666666666%; - } - .col-lg-pull-0 { - right: auto; - } - .col-lg-pull-1 { - right: 8.333333333333332%; - } - .col-lg-pull-2 { - right: 16.666666666666664%; - } - .col-lg-pull-3 { - right: 25%; - } - .col-lg-pull-4 { - right: 33.33333333333333%; - } - .col-lg-pull-5 { - right: 41.66666666666667%; - } - .col-lg-pull-6 { - right: 50%; - } - .col-lg-pull-7 { - right: 58.333333333333336%; - } - .col-lg-pull-8 { - right: 66.66666666666666%; - } - .col-lg-pull-9 { - right: 75%; - } - .col-lg-pull-10 { - right: 83.33333333333334%; - } - .col-lg-pull-11 { - right: 91.66666666666666%; - } - .col-lg-offset-0 { - margin-left: 0; - } - .col-lg-offset-1 { - margin-left: 8.333333333333332%; - } - .col-lg-offset-2 { - margin-left: 16.666666666666664%; - } - .col-lg-offset-3 { - margin-left: 25%; - } - .col-lg-offset-4 { - margin-left: 33.33333333333333%; - } - .col-lg-offset-5 { - margin-left: 41.66666666666667%; - } - .col-lg-offset-6 { - margin-left: 50%; - } - .col-lg-offset-7 { - margin-left: 58.333333333333336%; - } - .col-lg-offset-8 { - margin-left: 66.66666666666666%; - } - .col-lg-offset-9 { - margin-left: 75%; - } - .col-lg-offset-10 { - margin-left: 83.33333333333334%; - } - .col-lg-offset-11 { - margin-left: 91.66666666666666%; - } -} - -table { - max-width: 100%; - background-color: transparent; -} - -th { - text-align: left; -} - -.table { - width: 100%; - margin-bottom: 20px; -} - -.table thead > tr > th, -.table tbody > tr > th, -.table tfoot > tr > th, -.table thead > tr > td, -.table tbody > tr > td, -.table tfoot > tr > td { - padding: 8px; - line-height: 1.428571429; - vertical-align: top; - border-top: 1px solid #dddddd; -} - -.table thead > tr > th { - vertical-align: bottom; - border-bottom: 2px solid #dddddd; -} - -.table caption + thead tr:first-child th, -.table colgroup + thead tr:first-child th, -.table thead:first-child tr:first-child th, -.table caption + thead tr:first-child td, -.table colgroup + thead tr:first-child td, -.table thead:first-child tr:first-child td { - border-top: 0; -} - -.table tbody + tbody { - border-top: 2px solid #dddddd; -} - -.table .table { - background-color: #ffffff; -} - -.table-condensed thead > tr > th, -.table-condensed tbody > tr > th, -.table-condensed tfoot > tr > th, -.table-condensed thead > tr > td, -.table-condensed tbody > tr > td, -.table-condensed tfoot > tr > td { - padding: 5px; -} - -.table-bordered { - border: 1px solid #dddddd; -} - -.table-bordered > thead > tr > th, -.table-bordered > tbody > tr > th, -.table-bordered > tfoot > tr > th, -.table-bordered > thead > tr > td, -.table-bordered > tbody > tr > td, -.table-bordered > tfoot > tr > td { - border: 1px solid #dddddd; -} - -.table-bordered > thead > tr > th, -.table-bordered > thead > tr > td { - border-bottom-width: 2px; -} - -.table-striped > tbody > tr:nth-child(odd) > td, -.table-striped > tbody > tr:nth-child(odd) > th { - background-color: #f9f9f9; -} - -.table-hover > tbody > tr:hover > td, -.table-hover > tbody > tr:hover > th { - background-color: #f5f5f5; -} - -table col[class*="col-"] { - display: table-column; - float: none; -} - -table td[class*="col-"], -table th[class*="col-"] { - display: table-cell; - float: none; -} - -.table > thead > tr > td.active, -.table > tbody > tr > td.active, -.table > tfoot > tr > td.active, -.table > thead > tr > th.active, -.table > tbody > tr > th.active, -.table > tfoot > tr > th.active, -.table > thead > tr.active > td, -.table > tbody > tr.active > td, -.table > tfoot > tr.active > td, -.table > thead > tr.active > th, -.table > tbody > tr.active > th, -.table > tfoot > tr.active > th { - background-color: #f5f5f5; -} - -.table > thead > tr > td.success, -.table > tbody > tr > td.success, -.table > tfoot > tr > td.success, -.table > thead > tr > th.success, -.table > tbody > tr > th.success, -.table > tfoot > tr > th.success, -.table > thead > tr.success > td, -.table > tbody > tr.success > td, -.table > tfoot > tr.success > td, -.table > thead > tr.success > th, -.table > tbody > tr.success > th, -.table > tfoot > tr.success > th { - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.table-hover > tbody > tr > td.success:hover, -.table-hover > tbody > tr > th.success:hover, -.table-hover > tbody > tr.success:hover > td { - background-color: #d0e9c6; - border-color: #c9e2b3; -} - -.table > thead > tr > td.danger, -.table > tbody > tr > td.danger, -.table > tfoot > tr > td.danger, -.table > thead > tr > th.danger, -.table > tbody > tr > th.danger, -.table > tfoot > tr > th.danger, -.table > thead > tr.danger > td, -.table > tbody > tr.danger > td, -.table > tfoot > tr.danger > td, -.table > thead > tr.danger > th, -.table > tbody > tr.danger > th, -.table > tfoot > tr.danger > th { - background-color: #f2dede; - border-color: #eed3d7; -} - -.table-hover > tbody > tr > td.danger:hover, -.table-hover > tbody > tr > th.danger:hover, -.table-hover > tbody > tr.danger:hover > td { - background-color: #ebcccc; - border-color: #e6c1c7; -} - -.table > thead > tr > td.warning, -.table > tbody > tr > td.warning, -.table > tfoot > tr > td.warning, -.table > thead > tr > th.warning, -.table > tbody > tr > th.warning, -.table > tfoot > tr > th.warning, -.table > thead > tr.warning > td, -.table > tbody > tr.warning > td, -.table > tfoot > tr.warning > td, -.table > thead > tr.warning > th, -.table > tbody > tr.warning > th, -.table > tfoot > tr.warning > th { - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.table-hover > tbody > tr > td.warning:hover, -.table-hover > tbody > tr > th.warning:hover, -.table-hover > tbody > tr.warning:hover > td { - background-color: #faf2cc; - border-color: #f8e5be; -} - -@media (max-width: 768px) { - .table-responsive { - width: 100%; - margin-bottom: 15px; - overflow-x: scroll; - overflow-y: hidden; - border: 1px solid #dddddd; - } - .table-responsive > .table { - margin-bottom: 0; - background-color: #fff; - } - .table-responsive > .table > thead > tr > th, - .table-responsive > .table > tbody > tr > th, - .table-responsive > .table > tfoot > tr > th, - .table-responsive > .table > thead > tr > td, - .table-responsive > .table > tbody > tr > td, - .table-responsive > .table > tfoot > tr > td { - white-space: nowrap; - } - .table-responsive > .table-bordered { - border: 0; - } - .table-responsive > .table-bordered > thead > tr > th:first-child, - .table-responsive > .table-bordered > tbody > tr > th:first-child, - .table-responsive > .table-bordered > tfoot > tr > th:first-child, - .table-responsive > .table-bordered > thead > tr > td:first-child, - .table-responsive > .table-bordered > tbody > tr > td:first-child, - .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; - } - .table-responsive > .table-bordered > thead > tr > th:last-child, - .table-responsive > .table-bordered > tbody > tr > th:last-child, - .table-responsive > .table-bordered > tfoot > tr > th:last-child, - .table-responsive > .table-bordered > thead > tr > td:last-child, - .table-responsive > .table-bordered > tbody > tr > td:last-child, - .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; - } - .table-responsive > .table-bordered > thead > tr:last-child > th, - .table-responsive > .table-bordered > tbody > tr:last-child > th, - .table-responsive > .table-bordered > tfoot > tr:last-child > th, - .table-responsive > .table-bordered > thead > tr:last-child > td, - .table-responsive > .table-bordered > tbody > tr:last-child > td, - .table-responsive > .table-bordered > tfoot > tr:last-child > td { - border-bottom: 0; - } -} - -fieldset { - padding: 0; - margin: 0; - border: 0; -} - -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: inherit; - color: #333333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} - -label { - display: inline-block; - margin-bottom: 5px; - font-weight: bold; -} - -input[type="search"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - /* IE8-9 */ - - line-height: normal; -} - -input[type="file"] { - display: block; -} - -select[multiple], -select[size] { - height: auto; -} - -select optgroup { - font-family: inherit; - font-size: inherit; - font-style: inherit; -} - -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -input[type="number"]::-webkit-outer-spin-button, -input[type="number"]::-webkit-inner-spin-button { - height: auto; -} - -.form-control:-moz-placeholder { - color: #999999; -} - -.form-control::-moz-placeholder { - color: #999999; -} - -.form-control:-ms-input-placeholder { - color: #999999; -} - -.form-control::-webkit-input-placeholder { - color: #999999; -} - -.form-control { - display: block; - width: 100%; - height: 34px; - padding: 6px 12px; - font-size: 14px; - line-height: 1.428571429; - color: #555555; - vertical-align: middle; - background-color: #ffffff; - border: 1px solid #cccccc; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; - transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; -} - -.form-control:focus { - border-color: #66afe9; - outline: 0; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); -} - -.form-control[disabled], -.form-control[readonly], -fieldset[disabled] .form-control { - cursor: not-allowed; - background-color: #eeeeee; -} - -textarea.form-control { - height: auto; -} - -.form-group { - margin-bottom: 15px; -} - -.radio, -.checkbox { - display: block; - min-height: 20px; - padding-left: 20px; - margin-top: 10px; - margin-bottom: 10px; - vertical-align: middle; -} - -.radio label, -.checkbox label { - display: inline; - margin-bottom: 0; - font-weight: normal; - cursor: pointer; -} - -.radio input[type="radio"], -.radio-inline input[type="radio"], -.checkbox input[type="checkbox"], -.checkbox-inline input[type="checkbox"] { - float: left; - margin-left: -20px; -} - -.radio + .radio, -.checkbox + .checkbox { - margin-top: -5px; -} - -.radio-inline, -.checkbox-inline { - display: inline-block; - padding-left: 20px; - margin-bottom: 0; - font-weight: normal; - vertical-align: middle; - cursor: pointer; -} - -.radio-inline + .radio-inline, -.checkbox-inline + .checkbox-inline { - margin-top: 0; - margin-left: 10px; -} - -input[type="radio"][disabled], -input[type="checkbox"][disabled], -.radio[disabled], -.radio-inline[disabled], -.checkbox[disabled], -.checkbox-inline[disabled], -fieldset[disabled] input[type="radio"], -fieldset[disabled] input[type="checkbox"], -fieldset[disabled] .radio, -fieldset[disabled] .radio-inline, -fieldset[disabled] .checkbox, -fieldset[disabled] .checkbox-inline { - cursor: not-allowed; -} - -.input-sm { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -select.input-sm { - height: 30px; - line-height: 30px; -} - -textarea.input-sm { - height: auto; -} - -.input-lg { - height: 45px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -select.input-lg { - height: 45px; - line-height: 45px; -} - -textarea.input-lg { - height: auto; -} - -.has-warning .help-block, -.has-warning .control-label { - color: #c09853; -} - -.has-warning .form-control { - border-color: #c09853; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-warning .form-control:focus { - border-color: #a47e3c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; -} - -.has-warning .input-group-addon { - color: #c09853; - background-color: #fcf8e3; - border-color: #c09853; -} - -.has-error .help-block, -.has-error .control-label { - color: #b94a48; -} - -.has-error .form-control { - border-color: #b94a48; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-error .form-control:focus { - border-color: #953b39; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; -} - -.has-error .input-group-addon { - color: #b94a48; - background-color: #f2dede; - border-color: #b94a48; -} - -.has-success .help-block, -.has-success .control-label { - color: #468847; -} - -.has-success .form-control { - border-color: #468847; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.has-success .form-control:focus { - border-color: #356635; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; -} - -.has-success .input-group-addon { - color: #468847; - background-color: #dff0d8; - border-color: #468847; -} - -.form-control-static { - padding-top: 7px; - margin-bottom: 0; -} - -.help-block { - display: block; - margin-top: 5px; - margin-bottom: 10px; - color: #737373; -} - -@media (min-width: 768px) { - .form-inline .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .form-control { - display: inline-block; - } - .form-inline .radio, - .form-inline .checkbox { - display: inline-block; - padding-left: 0; - margin-top: 0; - margin-bottom: 0; - } - .form-inline .radio input[type="radio"], - .form-inline .checkbox input[type="checkbox"] { - float: none; - margin-left: 0; - } -} - -.form-horizontal .control-label, -.form-horizontal .radio, -.form-horizontal .checkbox, -.form-horizontal .radio-inline, -.form-horizontal .checkbox-inline { - padding-top: 7px; - margin-top: 0; - margin-bottom: 0; -} - -.form-horizontal .form-group { - margin-right: -15px; - margin-left: -15px; -} - -.form-horizontal .form-group:before, -.form-horizontal .form-group:after { - display: table; - content: " "; -} - -.form-horizontal .form-group:after { - clear: both; -} - -.form-horizontal .form-group:before, -.form-horizontal .form-group:after { - display: table; - content: " "; -} - -.form-horizontal .form-group:after { - clear: both; -} - -@media (min-width: 768px) { - .form-horizontal .control-label { - text-align: right; - } -} - -.btn { - display: inline-block; - padding: 6px 12px; - margin-bottom: 0; - font-size: 14px; - font-weight: normal; - line-height: 1.428571429; - text-align: center; - white-space: nowrap; - vertical-align: middle; - cursor: pointer; - border: 1px solid transparent; - border-radius: 4px; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - -o-user-select: none; - user-select: none; -} - -.btn:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -.btn:hover, -.btn:focus { - color: #333333; - text-decoration: none; -} - -.btn:active, -.btn.active { - background-image: none; - outline: 0; - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn.disabled, -.btn[disabled], -fieldset[disabled] .btn { - pointer-events: none; - cursor: not-allowed; - opacity: 0.65; - filter: alpha(opacity=65); - -webkit-box-shadow: none; - box-shadow: none; -} - -.btn-default { - color: #333333; - background-color: #ffffff; - border-color: #cccccc; -} - -.btn-default:hover, -.btn-default:focus, -.btn-default:active, -.btn-default.active, -.open .dropdown-toggle.btn-default { - color: #333333; - background-color: #ebebeb; - border-color: #adadad; -} - -.btn-default:active, -.btn-default.active, -.open .dropdown-toggle.btn-default { - background-image: none; -} - -.btn-default.disabled, -.btn-default[disabled], -fieldset[disabled] .btn-default, -.btn-default.disabled:hover, -.btn-default[disabled]:hover, -fieldset[disabled] .btn-default:hover, -.btn-default.disabled:focus, -.btn-default[disabled]:focus, -fieldset[disabled] .btn-default:focus, -.btn-default.disabled:active, -.btn-default[disabled]:active, -fieldset[disabled] .btn-default:active, -.btn-default.disabled.active, -.btn-default[disabled].active, -fieldset[disabled] .btn-default.active { - background-color: #ffffff; - border-color: #cccccc; -} - -.btn-primary { - color: #ffffff; - background-color: #428bca; - border-color: #357ebd; -} - -.btn-primary:hover, -.btn-primary:focus, -.btn-primary:active, -.btn-primary.active, -.open .dropdown-toggle.btn-primary { - color: #ffffff; - background-color: #3276b1; - border-color: #285e8e; -} - -.btn-primary:active, -.btn-primary.active, -.open .dropdown-toggle.btn-primary { - background-image: none; -} - -.btn-primary.disabled, -.btn-primary[disabled], -fieldset[disabled] .btn-primary, -.btn-primary.disabled:hover, -.btn-primary[disabled]:hover, -fieldset[disabled] .btn-primary:hover, -.btn-primary.disabled:focus, -.btn-primary[disabled]:focus, -fieldset[disabled] .btn-primary:focus, -.btn-primary.disabled:active, -.btn-primary[disabled]:active, -fieldset[disabled] .btn-primary:active, -.btn-primary.disabled.active, -.btn-primary[disabled].active, -fieldset[disabled] .btn-primary.active { - background-color: #428bca; - border-color: #357ebd; -} - -.btn-warning { - color: #ffffff; - background-color: #f0ad4e; - border-color: #eea236; -} - -.btn-warning:hover, -.btn-warning:focus, -.btn-warning:active, -.btn-warning.active, -.open .dropdown-toggle.btn-warning { - color: #ffffff; - background-color: #ed9c28; - border-color: #d58512; -} - -.btn-warning:active, -.btn-warning.active, -.open .dropdown-toggle.btn-warning { - background-image: none; -} - -.btn-warning.disabled, -.btn-warning[disabled], -fieldset[disabled] .btn-warning, -.btn-warning.disabled:hover, -.btn-warning[disabled]:hover, -fieldset[disabled] .btn-warning:hover, -.btn-warning.disabled:focus, -.btn-warning[disabled]:focus, -fieldset[disabled] .btn-warning:focus, -.btn-warning.disabled:active, -.btn-warning[disabled]:active, -fieldset[disabled] .btn-warning:active, -.btn-warning.disabled.active, -.btn-warning[disabled].active, -fieldset[disabled] .btn-warning.active { - background-color: #f0ad4e; - border-color: #eea236; -} - -.btn-danger { - color: #ffffff; - background-color: #d9534f; - border-color: #d43f3a; -} - -.btn-danger:hover, -.btn-danger:focus, -.btn-danger:active, -.btn-danger.active, -.open .dropdown-toggle.btn-danger { - color: #ffffff; - background-color: #d2322d; - border-color: #ac2925; -} - -.btn-danger:active, -.btn-danger.active, -.open .dropdown-toggle.btn-danger { - background-image: none; -} - -.btn-danger.disabled, -.btn-danger[disabled], -fieldset[disabled] .btn-danger, -.btn-danger.disabled:hover, -.btn-danger[disabled]:hover, -fieldset[disabled] .btn-danger:hover, -.btn-danger.disabled:focus, -.btn-danger[disabled]:focus, -fieldset[disabled] .btn-danger:focus, -.btn-danger.disabled:active, -.btn-danger[disabled]:active, -fieldset[disabled] .btn-danger:active, -.btn-danger.disabled.active, -.btn-danger[disabled].active, -fieldset[disabled] .btn-danger.active { - background-color: #d9534f; - border-color: #d43f3a; -} - -.btn-success { - color: #ffffff; - background-color: #5cb85c; - border-color: #4cae4c; -} - -.btn-success:hover, -.btn-success:focus, -.btn-success:active, -.btn-success.active, -.open .dropdown-toggle.btn-success { - color: #ffffff; - background-color: #47a447; - border-color: #398439; -} - -.btn-success:active, -.btn-success.active, -.open .dropdown-toggle.btn-success { - background-image: none; -} - -.btn-success.disabled, -.btn-success[disabled], -fieldset[disabled] .btn-success, -.btn-success.disabled:hover, -.btn-success[disabled]:hover, -fieldset[disabled] .btn-success:hover, -.btn-success.disabled:focus, -.btn-success[disabled]:focus, -fieldset[disabled] .btn-success:focus, -.btn-success.disabled:active, -.btn-success[disabled]:active, -fieldset[disabled] .btn-success:active, -.btn-success.disabled.active, -.btn-success[disabled].active, -fieldset[disabled] .btn-success.active { - background-color: #5cb85c; - border-color: #4cae4c; -} - -.btn-info { - color: #ffffff; - background-color: #5bc0de; - border-color: #46b8da; -} - -.btn-info:hover, -.btn-info:focus, -.btn-info:active, -.btn-info.active, -.open .dropdown-toggle.btn-info { - color: #ffffff; - background-color: #39b3d7; - border-color: #269abc; -} - -.btn-info:active, -.btn-info.active, -.open .dropdown-toggle.btn-info { - background-image: none; -} - -.btn-info.disabled, -.btn-info[disabled], -fieldset[disabled] .btn-info, -.btn-info.disabled:hover, -.btn-info[disabled]:hover, -fieldset[disabled] .btn-info:hover, -.btn-info.disabled:focus, -.btn-info[disabled]:focus, -fieldset[disabled] .btn-info:focus, -.btn-info.disabled:active, -.btn-info[disabled]:active, -fieldset[disabled] .btn-info:active, -.btn-info.disabled.active, -.btn-info[disabled].active, -fieldset[disabled] .btn-info.active { - background-color: #5bc0de; - border-color: #46b8da; -} - -.btn-link { - font-weight: normal; - color: #428bca; - cursor: pointer; - border-radius: 0; -} - -.btn-link, -.btn-link:active, -.btn-link[disabled], -fieldset[disabled] .btn-link { - background-color: transparent; - -webkit-box-shadow: none; - box-shadow: none; -} - -.btn-link, -.btn-link:hover, -.btn-link:focus, -.btn-link:active { - border-color: transparent; -} - -.btn-link:hover, -.btn-link:focus { - color: #2a6496; - text-decoration: underline; - background-color: transparent; -} - -.btn-link[disabled]:hover, -fieldset[disabled] .btn-link:hover, -.btn-link[disabled]:focus, -fieldset[disabled] .btn-link:focus { - color: #999999; - text-decoration: none; -} - -.btn-lg { - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -.btn-sm, -.btn-xs { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-xs { - padding: 1px 5px; -} - -.btn-block { - display: block; - width: 100%; - padding-right: 0; - padding-left: 0; -} - -.btn-block + .btn-block { - margin-top: 5px; -} - -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} - -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} - -.fade.in { - opacity: 1; -} - -.collapse { - display: none; -} - -.collapse.in { - display: block; -} - -.collapsing { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition: height 0.35s ease; - transition: height 0.35s ease; -} - -@font-face { - font-family: 'Glyphicons Halflings'; - src: url('../fonts/glyphicons-halflings-regular.eot'); - src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg'); -} - -.glyphicon { - position: relative; - top: 1px; - display: inline-block; - font-family: 'Glyphicons Halflings'; - -webkit-font-smoothing: antialiased; - font-style: normal; - font-weight: normal; - line-height: 1; -} - -.glyphicon-asterisk:before { - content: "\2a"; -} - -.glyphicon-plus:before { - content: "\2b"; -} - -.glyphicon-euro:before { - content: "\20ac"; -} - -.glyphicon-minus:before { - content: "\2212"; -} - -.glyphicon-cloud:before { - content: "\2601"; -} - -.glyphicon-envelope:before { - content: "\2709"; -} - -.glyphicon-pencil:before { - content: "\270f"; -} - -.glyphicon-glass:before { - content: "\e001"; -} - -.glyphicon-music:before { - content: "\e002"; -} - -.glyphicon-search:before { - content: "\e003"; -} - -.glyphicon-heart:before { - content: "\e005"; -} - -.glyphicon-star:before { - content: "\e006"; -} - -.glyphicon-star-empty:before { - content: "\e007"; -} - -.glyphicon-user:before { - content: "\e008"; -} - -.glyphicon-film:before { - content: "\e009"; -} - -.glyphicon-th-large:before { - content: "\e010"; -} - -.glyphicon-th:before { - content: "\e011"; -} - -.glyphicon-th-list:before { - content: "\e012"; -} - -.glyphicon-ok:before { - content: "\e013"; -} - -.glyphicon-remove:before { - content: "\e014"; -} - -.glyphicon-zoom-in:before { - content: "\e015"; -} - -.glyphicon-zoom-out:before { - content: "\e016"; -} - -.glyphicon-off:before { - content: "\e017"; -} - -.glyphicon-signal:before { - content: "\e018"; -} - -.glyphicon-cog:before { - content: "\e019"; -} - -.glyphicon-trash:before { - content: "\e020"; -} - -.glyphicon-home:before { - content: "\e021"; -} - -.glyphicon-file:before { - content: "\e022"; -} - -.glyphicon-time:before { - content: "\e023"; -} - -.glyphicon-road:before { - content: "\e024"; -} - -.glyphicon-download-alt:before { - content: "\e025"; -} - -.glyphicon-download:before { - content: "\e026"; -} - -.glyphicon-upload:before { - content: "\e027"; -} - -.glyphicon-inbox:before { - content: "\e028"; -} - -.glyphicon-play-circle:before { - content: "\e029"; -} - -.glyphicon-repeat:before { - content: "\e030"; -} - -.glyphicon-refresh:before { - content: "\e031"; -} - -.glyphicon-list-alt:before { - content: "\e032"; -} - -.glyphicon-flag:before { - content: "\e034"; -} - -.glyphicon-headphones:before { - content: "\e035"; -} - -.glyphicon-volume-off:before { - content: "\e036"; -} - -.glyphicon-volume-down:before { - content: "\e037"; -} - -.glyphicon-volume-up:before { - content: "\e038"; -} - -.glyphicon-qrcode:before { - content: "\e039"; -} - -.glyphicon-barcode:before { - content: "\e040"; -} - -.glyphicon-tag:before { - content: "\e041"; -} - -.glyphicon-tags:before { - content: "\e042"; -} - -.glyphicon-book:before { - content: "\e043"; -} - -.glyphicon-print:before { - content: "\e045"; -} - -.glyphicon-font:before { - content: "\e047"; -} - -.glyphicon-bold:before { - content: "\e048"; -} - -.glyphicon-italic:before { - content: "\e049"; -} - -.glyphicon-text-height:before { - content: "\e050"; -} - -.glyphicon-text-width:before { - content: "\e051"; -} - -.glyphicon-align-left:before { - content: "\e052"; -} - -.glyphicon-align-center:before { - content: "\e053"; -} - -.glyphicon-align-right:before { - content: "\e054"; -} - -.glyphicon-align-justify:before { - content: "\e055"; -} - -.glyphicon-list:before { - content: "\e056"; -} - -.glyphicon-indent-left:before { - content: "\e057"; -} - -.glyphicon-indent-right:before { - content: "\e058"; -} - -.glyphicon-facetime-video:before { - content: "\e059"; -} - -.glyphicon-picture:before { - content: "\e060"; -} - -.glyphicon-map-marker:before { - content: "\e062"; -} - -.glyphicon-adjust:before { - content: "\e063"; -} - -.glyphicon-tint:before { - content: "\e064"; -} - -.glyphicon-edit:before { - content: "\e065"; -} - -.glyphicon-share:before { - content: "\e066"; -} - -.glyphicon-check:before { - content: "\e067"; -} - -.glyphicon-move:before { - content: "\e068"; -} - -.glyphicon-step-backward:before { - content: "\e069"; -} - -.glyphicon-fast-backward:before { - content: "\e070"; -} - -.glyphicon-backward:before { - content: "\e071"; -} - -.glyphicon-play:before { - content: "\e072"; -} - -.glyphicon-pause:before { - content: "\e073"; -} - -.glyphicon-stop:before { - content: "\e074"; -} - -.glyphicon-forward:before { - content: "\e075"; -} - -.glyphicon-fast-forward:before { - content: "\e076"; -} - -.glyphicon-step-forward:before { - content: "\e077"; -} - -.glyphicon-eject:before { - content: "\e078"; -} - -.glyphicon-chevron-left:before { - content: "\e079"; -} - -.glyphicon-chevron-right:before { - content: "\e080"; -} - -.glyphicon-plus-sign:before { - content: "\e081"; -} - -.glyphicon-minus-sign:before { - content: "\e082"; -} - -.glyphicon-remove-sign:before { - content: "\e083"; -} - -.glyphicon-ok-sign:before { - content: "\e084"; -} - -.glyphicon-question-sign:before { - content: "\e085"; -} - -.glyphicon-info-sign:before { - content: "\e086"; -} - -.glyphicon-screenshot:before { - content: "\e087"; -} - -.glyphicon-remove-circle:before { - content: "\e088"; -} - -.glyphicon-ok-circle:before { - content: "\e089"; -} - -.glyphicon-ban-circle:before { - content: "\e090"; -} - -.glyphicon-arrow-left:before { - content: "\e091"; -} - -.glyphicon-arrow-right:before { - content: "\e092"; -} - -.glyphicon-arrow-up:before { - content: "\e093"; -} - -.glyphicon-arrow-down:before { - content: "\e094"; -} - -.glyphicon-share-alt:before { - content: "\e095"; -} - -.glyphicon-resize-full:before { - content: "\e096"; -} - -.glyphicon-resize-small:before { - content: "\e097"; -} - -.glyphicon-exclamation-sign:before { - content: "\e101"; -} - -.glyphicon-gift:before { - content: "\e102"; -} - -.glyphicon-leaf:before { - content: "\e103"; -} - -.glyphicon-eye-open:before { - content: "\e105"; -} - -.glyphicon-eye-close:before { - content: "\e106"; -} - -.glyphicon-warning-sign:before { - content: "\e107"; -} - -.glyphicon-plane:before { - content: "\e108"; -} - -.glyphicon-random:before { - content: "\e110"; -} - -.glyphicon-comment:before { - content: "\e111"; -} - -.glyphicon-magnet:before { - content: "\e112"; -} - -.glyphicon-chevron-up:before { - content: "\e113"; -} - -.glyphicon-chevron-down:before { - content: "\e114"; -} - -.glyphicon-retweet:before { - content: "\e115"; -} - -.glyphicon-shopping-cart:before { - content: "\e116"; -} - -.glyphicon-folder-close:before { - content: "\e117"; -} - -.glyphicon-folder-open:before { - content: "\e118"; -} - -.glyphicon-resize-vertical:before { - content: "\e119"; -} - -.glyphicon-resize-horizontal:before { - content: "\e120"; -} - -.glyphicon-hdd:before { - content: "\e121"; -} - -.glyphicon-bullhorn:before { - content: "\e122"; -} - -.glyphicon-certificate:before { - content: "\e124"; -} - -.glyphicon-thumbs-up:before { - content: "\e125"; -} - -.glyphicon-thumbs-down:before { - content: "\e126"; -} - -.glyphicon-hand-right:before { - content: "\e127"; -} - -.glyphicon-hand-left:before { - content: "\e128"; -} - -.glyphicon-hand-up:before { - content: "\e129"; -} - -.glyphicon-hand-down:before { - content: "\e130"; -} - -.glyphicon-circle-arrow-right:before { - content: "\e131"; -} - -.glyphicon-circle-arrow-left:before { - content: "\e132"; -} - -.glyphicon-circle-arrow-up:before { - content: "\e133"; -} - -.glyphicon-circle-arrow-down:before { - content: "\e134"; -} - -.glyphicon-globe:before { - content: "\e135"; -} - -.glyphicon-tasks:before { - content: "\e137"; -} - -.glyphicon-filter:before { - content: "\e138"; -} - -.glyphicon-fullscreen:before { - content: "\e140"; -} - -.glyphicon-dashboard:before { - content: "\e141"; -} - -.glyphicon-heart-empty:before { - content: "\e143"; -} - -.glyphicon-link:before { - content: "\e144"; -} - -.glyphicon-phone:before { - content: "\e145"; -} - -.glyphicon-usd:before { - content: "\e148"; -} - -.glyphicon-gbp:before { - content: "\e149"; -} - -.glyphicon-sort:before { - content: "\e150"; -} - -.glyphicon-sort-by-alphabet:before { - content: "\e151"; -} - -.glyphicon-sort-by-alphabet-alt:before { - content: "\e152"; -} - -.glyphicon-sort-by-order:before { - content: "\e153"; -} - -.glyphicon-sort-by-order-alt:before { - content: "\e154"; -} - -.glyphicon-sort-by-attributes:before { - content: "\e155"; -} - -.glyphicon-sort-by-attributes-alt:before { - content: "\e156"; -} - -.glyphicon-unchecked:before { - content: "\e157"; -} - -.glyphicon-expand:before { - content: "\e158"; -} - -.glyphicon-collapse-down:before { - content: "\e159"; -} - -.glyphicon-collapse-up:before { - content: "\e160"; -} - -.glyphicon-log-in:before { - content: "\e161"; -} - -.glyphicon-flash:before { - content: "\e162"; -} - -.glyphicon-log-out:before { - content: "\e163"; -} - -.glyphicon-new-window:before { - content: "\e164"; -} - -.glyphicon-record:before { - content: "\e165"; -} - -.glyphicon-save:before { - content: "\e166"; -} - -.glyphicon-open:before { - content: "\e167"; -} - -.glyphicon-saved:before { - content: "\e168"; -} - -.glyphicon-import:before { - content: "\e169"; -} - -.glyphicon-export:before { - content: "\e170"; -} - -.glyphicon-send:before { - content: "\e171"; -} - -.glyphicon-floppy-disk:before { - content: "\e172"; -} - -.glyphicon-floppy-saved:before { - content: "\e173"; -} - -.glyphicon-floppy-remove:before { - content: "\e174"; -} - -.glyphicon-floppy-save:before { - content: "\e175"; -} - -.glyphicon-floppy-open:before { - content: "\e176"; -} - -.glyphicon-credit-card:before { - content: "\e177"; -} - -.glyphicon-transfer:before { - content: "\e178"; -} - -.glyphicon-cutlery:before { - content: "\e179"; -} - -.glyphicon-header:before { - content: "\e180"; -} - -.glyphicon-compressed:before { - content: "\e181"; -} - -.glyphicon-earphone:before { - content: "\e182"; -} - -.glyphicon-phone-alt:before { - content: "\e183"; -} - -.glyphicon-tower:before { - content: "\e184"; -} - -.glyphicon-stats:before { - content: "\e185"; -} - -.glyphicon-sd-video:before { - content: "\e186"; -} - -.glyphicon-hd-video:before { - content: "\e187"; -} - -.glyphicon-subtitles:before { - content: "\e188"; -} - -.glyphicon-sound-stereo:before { - content: "\e189"; -} - -.glyphicon-sound-dolby:before { - content: "\e190"; -} - -.glyphicon-sound-5-1:before { - content: "\e191"; -} - -.glyphicon-sound-6-1:before { - content: "\e192"; -} - -.glyphicon-sound-7-1:before { - content: "\e193"; -} - -.glyphicon-copyright-mark:before { - content: "\e194"; -} - -.glyphicon-registration-mark:before { - content: "\e195"; -} - -.glyphicon-cloud-download:before { - content: "\e197"; -} - -.glyphicon-cloud-upload:before { - content: "\e198"; -} - -.glyphicon-tree-conifer:before { - content: "\e199"; -} - -.glyphicon-tree-deciduous:before { - content: "\e200"; -} - -.glyphicon-briefcase:before { - content: "\1f4bc"; -} - -.glyphicon-calendar:before { - content: "\1f4c5"; -} - -.glyphicon-pushpin:before { - content: "\1f4cc"; -} - -.glyphicon-paperclip:before { - content: "\1f4ce"; -} - -.glyphicon-camera:before { - content: "\1f4f7"; -} - -.glyphicon-lock:before { - content: "\1f512"; -} - -.glyphicon-bell:before { - content: "\1f514"; -} - -.glyphicon-bookmark:before { - content: "\1f516"; -} - -.glyphicon-fire:before { - content: "\1f525"; -} - -.glyphicon-wrench:before { - content: "\1f527"; -} - -.caret { - display: inline-block; - width: 0; - height: 0; - margin-left: 2px; - vertical-align: middle; - border-top: 4px solid #000000; - border-right: 4px solid transparent; - border-bottom: 0 dotted; - border-left: 4px solid transparent; - content: ""; -} - -.dropdown { - position: relative; -} - -.dropdown-toggle:focus { - outline: 0; -} - -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - font-size: 14px; - list-style: none; - background-color: #ffffff; - border: 1px solid #cccccc; - border: 1px solid rgba(0, 0, 0, 0.15); - border-radius: 4px; - -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - background-clip: padding-box; -} - -.dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.dropdown-menu .divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} - -.dropdown-menu > li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: normal; - line-height: 1.428571429; - color: #333333; - white-space: nowrap; -} - -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus { - color: #ffffff; - text-decoration: none; - background-color: #428bca; -} - -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - color: #ffffff; - text-decoration: none; - background-color: #428bca; - outline: 0; -} - -.dropdown-menu > .disabled > a, -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - color: #999999; -} - -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - text-decoration: none; - cursor: not-allowed; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.open > .dropdown-menu { - display: block; -} - -.open > a { - outline: 0; -} - -.dropdown-header { - display: block; - padding: 3px 20px; - font-size: 12px; - line-height: 1.428571429; - color: #999999; -} - -.dropdown-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 990; -} - -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} - -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - border-top: 0 dotted; - border-bottom: 4px solid #000000; - content: ""; -} - -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 1px; -} - -@media (min-width: 768px) { - .navbar-right .dropdown-menu { - right: 0; - left: auto; - } -} - -.btn-default .caret { - border-top-color: #333333; -} - -.btn-primary .caret, -.btn-success .caret, -.btn-warning .caret, -.btn-danger .caret, -.btn-info .caret { - border-top-color: #fff; -} - -.dropup .btn-default .caret { - border-bottom-color: #333333; -} - -.dropup .btn-primary .caret, -.dropup .btn-success .caret, -.dropup .btn-warning .caret, -.dropup .btn-danger .caret, -.dropup .btn-info .caret { - border-bottom-color: #fff; -} - -.btn-group, -.btn-group-vertical { - position: relative; - display: inline-block; - vertical-align: middle; -} - -.btn-group > .btn, -.btn-group-vertical > .btn { - position: relative; - float: left; -} - -.btn-group > .btn:hover, -.btn-group-vertical > .btn:hover, -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus, -.btn-group > .btn:active, -.btn-group-vertical > .btn:active, -.btn-group > .btn.active, -.btn-group-vertical > .btn.active { - z-index: 2; -} - -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus { - outline: none; -} - -.btn-group .btn + .btn, -.btn-group .btn + .btn-group, -.btn-group .btn-group + .btn, -.btn-group .btn-group + .btn-group { - margin-left: -1px; -} - -.btn-toolbar:before, -.btn-toolbar:after { - display: table; - content: " "; -} - -.btn-toolbar:after { - clear: both; -} - -.btn-toolbar:before, -.btn-toolbar:after { - display: table; - content: " "; -} - -.btn-toolbar:after { - clear: both; -} - -.btn-toolbar .btn-group { - float: left; -} - -.btn-toolbar > .btn + .btn, -.btn-toolbar > .btn-group + .btn, -.btn-toolbar > .btn + .btn-group, -.btn-toolbar > .btn-group + .btn-group { - margin-left: 5px; -} - -.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { - border-radius: 0; -} - -.btn-group > .btn:first-child { - margin-left: 0; -} - -.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.btn-group > .btn:last-child:not(:first-child), -.btn-group > .dropdown-toggle:not(:first-child) { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.btn-group > .btn-group { - float: left; -} - -.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} - -.btn-group > .btn-group:first-child > .btn:last-child, -.btn-group > .btn-group:first-child > .dropdown-toggle { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.btn-group > .btn-group:last-child > .btn:first-child { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} - -.btn-group-xs > .btn { - padding: 5px 10px; - padding: 1px 5px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-group-sm > .btn { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -.btn-group-lg > .btn { - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -.btn-group > .btn + .dropdown-toggle { - padding-right: 8px; - padding-left: 8px; -} - -.btn-group > .btn-lg + .dropdown-toggle { - padding-right: 12px; - padding-left: 12px; -} - -.btn-group.open .dropdown-toggle { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} - -.btn .caret { - margin-left: 0; -} - -.btn-lg .caret { - border-width: 5px 5px 0; - border-bottom-width: 0; -} - -.dropup .btn-lg .caret { - border-width: 0 5px 5px; -} - -.btn-group-vertical > .btn, -.btn-group-vertical > .btn-group { - display: block; - float: none; - width: 100%; - max-width: 100%; -} - -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after { - display: table; - content: " "; -} - -.btn-group-vertical > .btn-group:after { - clear: both; -} - -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after { - display: table; - content: " "; -} - -.btn-group-vertical > .btn-group:after { - clear: both; -} - -.btn-group-vertical > .btn-group > .btn { - float: none; -} - -.btn-group-vertical > .btn + .btn, -.btn-group-vertical > .btn + .btn-group, -.btn-group-vertical > .btn-group + .btn, -.btn-group-vertical > .btn-group + .btn-group { - margin-top: -1px; - margin-left: 0; -} - -.btn-group-vertical > .btn:not(:first-child):not(:last-child) { - border-radius: 0; -} - -.btn-group-vertical > .btn:first-child:not(:last-child) { - border-top-right-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.btn-group-vertical > .btn:last-child:not(:first-child) { - border-top-right-radius: 0; - border-bottom-left-radius: 4px; - border-top-left-radius: 0; -} - -.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} - -.btn-group-vertical > .btn-group:first-child > .btn:last-child, -.btn-group-vertical > .btn-group:first-child > .dropdown-toggle { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.btn-group-vertical > .btn-group:last-child > .btn:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.btn-group-justified { - display: table; - width: 100%; - border-collapse: separate; - table-layout: fixed; -} - -.btn-group-justified .btn { - display: table-cell; - float: none; - width: 1%; -} - -[data-toggle="buttons"] > .btn > input[type="radio"], -[data-toggle="buttons"] > .btn > input[type="checkbox"] { - display: none; -} - -.input-group { - position: relative; - display: table; - border-collapse: separate; -} - -.input-group.col { - float: none; - padding-right: 0; - padding-left: 0; -} - -.input-group .form-control { - width: 100%; - margin-bottom: 0; -} - -.input-group-lg > .form-control, -.input-group-lg > .input-group-addon, -.input-group-lg > .input-group-btn > .btn { - height: 45px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.33; - border-radius: 6px; -} - -select.input-group-lg > .form-control, -select.input-group-lg > .input-group-addon, -select.input-group-lg > .input-group-btn > .btn { - height: 45px; - line-height: 45px; -} - -textarea.input-group-lg > .form-control, -textarea.input-group-lg > .input-group-addon, -textarea.input-group-lg > .input-group-btn > .btn { - height: auto; -} - -.input-group-sm > .form-control, -.input-group-sm > .input-group-addon, -.input-group-sm > .input-group-btn > .btn { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} - -select.input-group-sm > .form-control, -select.input-group-sm > .input-group-addon, -select.input-group-sm > .input-group-btn > .btn { - height: 30px; - line-height: 30px; -} - -textarea.input-group-sm > .form-control, -textarea.input-group-sm > .input-group-addon, -textarea.input-group-sm > .input-group-btn > .btn { - height: auto; -} - -.input-group-addon, -.input-group-btn, -.input-group .form-control { - display: table-cell; -} - -.input-group-addon:not(:first-child):not(:last-child), -.input-group-btn:not(:first-child):not(:last-child), -.input-group .form-control:not(:first-child):not(:last-child) { - border-radius: 0; -} - -.input-group-addon, -.input-group-btn { - width: 1%; - white-space: nowrap; - vertical-align: middle; -} - -.input-group-addon { - padding: 6px 12px; - font-size: 14px; - font-weight: normal; - line-height: 1; - text-align: center; - background-color: #eeeeee; - border: 1px solid #cccccc; - border-radius: 4px; -} - -.input-group-addon.input-sm { - padding: 5px 10px; - font-size: 12px; - border-radius: 3px; -} - -.input-group-addon.input-lg { - padding: 10px 16px; - font-size: 18px; - border-radius: 6px; -} - -.input-group-addon input[type="radio"], -.input-group-addon input[type="checkbox"] { - margin-top: 0; -} - -.input-group .form-control:first-child, -.input-group-addon:first-child, -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .dropdown-toggle, -.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -.input-group-addon:first-child { - border-right: 0; -} - -.input-group .form-control:last-child, -.input-group-addon:last-child, -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .dropdown-toggle, -.input-group-btn:first-child > .btn:not(:first-child) { - border-bottom-left-radius: 0; - border-top-left-radius: 0; -} - -.input-group-addon:last-child { - border-left: 0; -} - -.input-group-btn { - position: relative; - white-space: nowrap; -} - -.input-group-btn > .btn { - position: relative; -} - -.input-group-btn > .btn + .btn { - margin-left: -4px; -} - -.input-group-btn > .btn:hover, -.input-group-btn > .btn:active { - z-index: 2; -} - -.nav { - padding-left: 0; - margin-bottom: 0; - list-style: none; -} - -.nav:before, -.nav:after { - display: table; - content: " "; -} - -.nav:after { - clear: both; -} - -.nav:before, -.nav:after { - display: table; - content: " "; -} - -.nav:after { - clear: both; -} - -.nav > li { - position: relative; - display: block; -} - -.nav > li > a { - position: relative; - display: block; - padding: 10px 15px; -} - -.nav > li > a:hover, -.nav > li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} - -.nav > li.disabled > a { - color: #999999; -} - -.nav > li.disabled > a:hover, -.nav > li.disabled > a:focus { - color: #999999; - text-decoration: none; - cursor: not-allowed; - background-color: transparent; -} - -.nav .open > a, -.nav .open > a:hover, -.nav .open > a:focus { - background-color: #eeeeee; - border-color: #428bca; -} - -.nav .nav-divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} - -.nav > li > a > img { - max-width: none; -} - -.nav-tabs { - border-bottom: 1px solid #dddddd; -} - -.nav-tabs > li { - float: left; - margin-bottom: -1px; -} - -.nav-tabs > li > a { - margin-right: 2px; - line-height: 1.428571429; - border: 1px solid transparent; - border-radius: 4px 4px 0 0; -} - -.nav-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #dddddd; -} - -.nav-tabs > li.active > a, -.nav-tabs > li.active > a:hover, -.nav-tabs > li.active > a:focus { - color: #555555; - cursor: default; - background-color: #ffffff; - border: 1px solid #dddddd; - border-bottom-color: transparent; -} - -.nav-tabs.nav-justified { - width: 100%; - border-bottom: 0; -} - -.nav-tabs.nav-justified > li { - float: none; -} - -.nav-tabs.nav-justified > li > a { - text-align: center; -} - -@media (min-width: 768px) { - .nav-tabs.nav-justified > li { - display: table-cell; - width: 1%; - } -} - -.nav-tabs.nav-justified > li > a { - margin-right: 0; - border-bottom: 1px solid #dddddd; -} - -.nav-tabs.nav-justified > .active > a { - border-bottom-color: #ffffff; -} - -.nav-pills > li { - float: left; -} - -.nav-pills > li > a { - border-radius: 5px; -} - -.nav-pills > li + li { - margin-left: 2px; -} - -.nav-pills > li.active > a, -.nav-pills > li.active > a:hover, -.nav-pills > li.active > a:focus { - color: #ffffff; - background-color: #428bca; -} - -.nav-stacked > li { - float: none; -} - -.nav-stacked > li + li { - margin-top: 2px; - margin-left: 0; -} - -.nav-justified { - width: 100%; -} - -.nav-justified > li { - float: none; -} - -.nav-justified > li > a { - text-align: center; -} - -@media (min-width: 768px) { - .nav-justified > li { - display: table-cell; - width: 1%; - } -} - -.nav-tabs-justified { - border-bottom: 0; -} - -.nav-tabs-justified > li > a { - margin-right: 0; - border-bottom: 1px solid #dddddd; -} - -.nav-tabs-justified > .active > a { - border-bottom-color: #ffffff; -} - -.tabbable:before, -.tabbable:after { - display: table; - content: " "; -} - -.tabbable:after { - clear: both; -} - -.tabbable:before, -.tabbable:after { - display: table; - content: " "; -} - -.tabbable:after { - clear: both; -} - -.tab-content > .tab-pane, -.pill-content > .pill-pane { - display: none; -} - -.tab-content > .active, -.pill-content > .active { - display: block; -} - -.nav .caret { - border-top-color: #428bca; - border-bottom-color: #428bca; -} - -.nav a:hover .caret { - border-top-color: #2a6496; - border-bottom-color: #2a6496; -} - -.nav-tabs .dropdown-menu { - margin-top: -1px; - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.navbar { - position: relative; - z-index: 1000; - min-height: 50px; - margin-bottom: 20px; - border: 1px solid transparent; -} - -.navbar:before, -.navbar:after { - display: table; - content: " "; -} - -.navbar:after { - clear: both; -} - -.navbar:before, -.navbar:after { - display: table; - content: " "; -} - -.navbar:after { - clear: both; -} - -@media (min-width: 768px) { - .navbar { - border-radius: 4px; - } -} - -.navbar-header:before, -.navbar-header:after { - display: table; - content: " "; -} - -.navbar-header:after { - clear: both; -} - -.navbar-header:before, -.navbar-header:after { - display: table; - content: " "; -} - -.navbar-header:after { - clear: both; -} - -@media (min-width: 768px) { - .navbar-header { - float: left; - } -} - -.navbar-collapse { - max-height: 340px; - padding-right: 15px; - padding-left: 15px; - overflow-x: visible; - border-top: 1px solid transparent; - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - -webkit-overflow-scrolling: touch; -} - -.navbar-collapse:before, -.navbar-collapse:after { - display: table; - content: " "; -} - -.navbar-collapse:after { - clear: both; -} - -.navbar-collapse:before, -.navbar-collapse:after { - display: table; - content: " "; -} - -.navbar-collapse:after { - clear: both; -} - -.navbar-collapse.in { - overflow-y: auto; -} - -@media (min-width: 768px) { - .navbar-collapse { - width: auto; - border-top: 0; - box-shadow: none; - } - .navbar-collapse.collapse { - display: block !important; - height: auto !important; - padding-bottom: 0; - overflow: visible !important; - } - .navbar-collapse.in { - overflow-y: visible; - } - .navbar-collapse .navbar-nav.navbar-left:first-child { - margin-left: -15px; - } - .navbar-collapse .navbar-nav.navbar-right:last-child { - margin-right: -15px; - } - .navbar-collapse .navbar-text:last-child { - margin-right: 0; - } -} - -.container > .navbar-header, -.container > .navbar-collapse { - margin-right: -15px; - margin-left: -15px; -} - -@media (min-width: 768px) { - .container > .navbar-header, - .container > .navbar-collapse { - margin-right: 0; - margin-left: 0; - } -} - -.navbar-static-top { - border-width: 0 0 1px; -} - -@media (min-width: 768px) { - .navbar-static-top { - border-radius: 0; - } -} - -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - border-width: 0 0 1px; -} - -@media (min-width: 768px) { - .navbar-fixed-top, - .navbar-fixed-bottom { - border-radius: 0; - } -} - -.navbar-fixed-top { - top: 0; - z-index: 1030; -} - -.navbar-fixed-bottom { - bottom: 0; - margin-bottom: 0; -} - -.navbar-brand { - float: left; - padding: 15px 15px; - font-size: 18px; - line-height: 20px; -} - -.navbar-brand:hover, -.navbar-brand:focus { - text-decoration: none; -} - -@media (min-width: 768px) { - .navbar > .container .navbar-brand { - margin-left: -15px; - } -} - -.navbar-toggle { - position: relative; - float: right; - padding: 9px 10px; - margin-top: 8px; - margin-right: 15px; - margin-bottom: 8px; - background-color: transparent; - border: 1px solid transparent; - border-radius: 4px; -} - -.navbar-toggle .icon-bar { - display: block; - width: 22px; - height: 2px; - border-radius: 1px; -} - -.navbar-toggle .icon-bar + .icon-bar { - margin-top: 4px; -} - -@media (min-width: 768px) { - .navbar-toggle { - display: none; - } -} - -.navbar-nav { - margin: 7.5px -15px; -} - -.navbar-nav > li > a { - padding-top: 10px; - padding-bottom: 10px; - line-height: 20px; -} - -@media (max-width: 767px) { - .navbar-nav .open .dropdown-menu { - position: static; - float: none; - width: auto; - margin-top: 0; - background-color: transparent; - border: 0; - box-shadow: none; - } - .navbar-nav .open .dropdown-menu > li > a, - .navbar-nav .open .dropdown-menu .dropdown-header { - padding: 5px 15px 5px 25px; - } - .navbar-nav .open .dropdown-menu > li > a { - line-height: 20px; - } - .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-nav .open .dropdown-menu > li > a:focus { - background-image: none; - } -} - -@media (min-width: 768px) { - .navbar-nav { - float: left; - margin: 0; - } - .navbar-nav > li { - float: left; - } - .navbar-nav > li > a { - padding-top: 15px; - padding-bottom: 15px; - } -} - -@media (min-width: 768px) { - .navbar-left { - float: left !important; - } - .navbar-right { - float: right !important; - } -} - -.navbar-form { - padding: 10px 15px; - margin-top: 8px; - margin-right: -15px; - margin-bottom: 8px; - margin-left: -15px; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); -} - -@media (min-width: 768px) { - .navbar-form .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .form-control { - display: inline-block; - } - .navbar-form .radio, - .navbar-form .checkbox { - display: inline-block; - padding-left: 0; - margin-top: 0; - margin-bottom: 0; - } - .navbar-form .radio input[type="radio"], - .navbar-form .checkbox input[type="checkbox"] { - float: none; - margin-left: 0; - } -} - -@media (max-width: 767px) { - .navbar-form .form-group { - margin-bottom: 5px; - } -} - -@media (min-width: 768px) { - .navbar-form { - width: auto; - padding-top: 0; - padding-bottom: 0; - margin-right: 0; - margin-left: 0; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } -} - -.navbar-nav > li > .dropdown-menu { - margin-top: 0; - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} - -.navbar-nav.pull-right > li > .dropdown-menu, -.navbar-nav > li > .dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.navbar-btn { - margin-top: 8px; - margin-bottom: 8px; -} - -.navbar-text { - float: left; - margin-top: 15px; - margin-bottom: 15px; -} - -@media (min-width: 768px) { - .navbar-text { - margin-right: 15px; - margin-left: 15px; - } -} - -.navbar-default { - background-color: #f8f8f8; - border-color: #e7e7e7; -} - -.navbar-default .navbar-brand { - color: #777777; -} - -.navbar-default .navbar-brand:hover, -.navbar-default .navbar-brand:focus { - color: #5e5e5e; - background-color: transparent; -} - -.navbar-default .navbar-text { - color: #777777; -} - -.navbar-default .navbar-nav > li > a { - color: #777777; -} - -.navbar-default .navbar-nav > li > a:hover, -.navbar-default .navbar-nav > li > a:focus { - color: #333333; - background-color: transparent; -} - -.navbar-default .navbar-nav > .active > a, -.navbar-default .navbar-nav > .active > a:hover, -.navbar-default .navbar-nav > .active > a:focus { - color: #555555; - background-color: #e7e7e7; -} - -.navbar-default .navbar-nav > .disabled > a, -.navbar-default .navbar-nav > .disabled > a:hover, -.navbar-default .navbar-nav > .disabled > a:focus { - color: #cccccc; - background-color: transparent; -} - -.navbar-default .navbar-toggle { - border-color: #dddddd; -} - -.navbar-default .navbar-toggle:hover, -.navbar-default .navbar-toggle:focus { - background-color: #dddddd; -} - -.navbar-default .navbar-toggle .icon-bar { - background-color: #cccccc; -} - -.navbar-default .navbar-collapse, -.navbar-default .navbar-form { - border-color: #e6e6e6; -} - -.navbar-default .navbar-nav > .dropdown > a:hover .caret, -.navbar-default .navbar-nav > .dropdown > a:focus .caret { - border-top-color: #333333; - border-bottom-color: #333333; -} - -.navbar-default .navbar-nav > .open > a, -.navbar-default .navbar-nav > .open > a:hover, -.navbar-default .navbar-nav > .open > a:focus { - color: #555555; - background-color: #e7e7e7; -} - -.navbar-default .navbar-nav > .open > a .caret, -.navbar-default .navbar-nav > .open > a:hover .caret, -.navbar-default .navbar-nav > .open > a:focus .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.navbar-default .navbar-nav > .dropdown > a .caret { - border-top-color: #777777; - border-bottom-color: #777777; -} - -@media (max-width: 767px) { - .navbar-default .navbar-nav .open .dropdown-menu > li > a { - color: #777777; - } - .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { - color: #333333; - background-color: transparent; - } - .navbar-default .navbar-nav .open .dropdown-menu > .active > a, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #555555; - background-color: #e7e7e7; - } - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #cccccc; - background-color: transparent; - } -} - -.navbar-default .navbar-link { - color: #777777; -} - -.navbar-default .navbar-link:hover { - color: #333333; -} - -.navbar-inverse { - background-color: #222222; - border-color: #080808; -} - -.navbar-inverse .navbar-brand { - color: #999999; -} - -.navbar-inverse .navbar-brand:hover, -.navbar-inverse .navbar-brand:focus { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .navbar-text { - color: #999999; -} - -.navbar-inverse .navbar-nav > li > a { - color: #999999; -} - -.navbar-inverse .navbar-nav > li > a:hover, -.navbar-inverse .navbar-nav > li > a:focus { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .navbar-nav > .active > a, -.navbar-inverse .navbar-nav > .active > a:hover, -.navbar-inverse .navbar-nav > .active > a:focus { - color: #ffffff; - background-color: #080808; -} - -.navbar-inverse .navbar-nav > .disabled > a, -.navbar-inverse .navbar-nav > .disabled > a:hover, -.navbar-inverse .navbar-nav > .disabled > a:focus { - color: #444444; - background-color: transparent; -} - -.navbar-inverse .navbar-toggle { - border-color: #333333; -} - -.navbar-inverse .navbar-toggle:hover, -.navbar-inverse .navbar-toggle:focus { - background-color: #333333; -} - -.navbar-inverse .navbar-toggle .icon-bar { - background-color: #ffffff; -} - -.navbar-inverse .navbar-collapse, -.navbar-inverse .navbar-form { - border-color: #101010; -} - -.navbar-inverse .navbar-nav > .open > a, -.navbar-inverse .navbar-nav > .open > a:hover, -.navbar-inverse .navbar-nav > .open > a:focus { - color: #ffffff; - background-color: #080808; -} - -.navbar-inverse .navbar-nav > .dropdown > a:hover .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.navbar-inverse .navbar-nav > .dropdown > a .caret { - border-top-color: #999999; - border-bottom-color: #999999; -} - -.navbar-inverse .navbar-nav > .open > a .caret, -.navbar-inverse .navbar-nav > .open > a:hover .caret, -.navbar-inverse .navbar-nav > .open > a:focus .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -@media (max-width: 767px) { - .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { - border-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { - color: #999999; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { - color: #ffffff; - background-color: transparent; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #ffffff; - background-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #444444; - background-color: transparent; - } -} - -.navbar-inverse .navbar-link { - color: #999999; -} - -.navbar-inverse .navbar-link:hover { - color: #ffffff; -} - -.breadcrumb { - padding: 8px 15px; - margin-bottom: 20px; - list-style: none; - background-color: #f5f5f5; - border-radius: 4px; -} - -.breadcrumb > li { - display: inline-block; -} - -.breadcrumb > li + li:before { - padding: 0 5px; - color: #cccccc; - content: "/\00a0"; -} - -.breadcrumb > .active { - color: #999999; -} - -.pagination { - display: inline-block; - padding-left: 0; - margin: 20px 0; - border-radius: 4px; -} - -.pagination > li { - display: inline; -} - -.pagination > li > a, -.pagination > li > span { - position: relative; - float: left; - padding: 6px 12px; - margin-left: -1px; - line-height: 1.428571429; - text-decoration: none; - background-color: #ffffff; - border: 1px solid #dddddd; -} - -.pagination > li:first-child > a, -.pagination > li:first-child > span { - margin-left: 0; - border-bottom-left-radius: 4px; - border-top-left-radius: 4px; -} - -.pagination > li:last-child > a, -.pagination > li:last-child > span { - border-top-right-radius: 4px; - border-bottom-right-radius: 4px; -} - -.pagination > li > a:hover, -.pagination > li > span:hover, -.pagination > li > a:focus, -.pagination > li > span:focus { - background-color: #eeeeee; -} - -.pagination > .active > a, -.pagination > .active > span, -.pagination > .active > a:hover, -.pagination > .active > span:hover, -.pagination > .active > a:focus, -.pagination > .active > span:focus { - z-index: 2; - color: #ffffff; - cursor: default; - background-color: #428bca; - border-color: #428bca; -} - -.pagination > .disabled > span, -.pagination > .disabled > a, -.pagination > .disabled > a:hover, -.pagination > .disabled > a:focus { - color: #999999; - cursor: not-allowed; - background-color: #ffffff; - border-color: #dddddd; -} - -.pagination-lg > li > a, -.pagination-lg > li > span { - padding: 10px 16px; - font-size: 18px; -} - -.pagination-lg > li:first-child > a, -.pagination-lg > li:first-child > span { - border-bottom-left-radius: 6px; - border-top-left-radius: 6px; -} - -.pagination-lg > li:last-child > a, -.pagination-lg > li:last-child > span { - border-top-right-radius: 6px; - border-bottom-right-radius: 6px; -} - -.pagination-sm > li > a, -.pagination-sm > li > span { - padding: 5px 10px; - font-size: 12px; -} - -.pagination-sm > li:first-child > a, -.pagination-sm > li:first-child > span { - border-bottom-left-radius: 3px; - border-top-left-radius: 3px; -} - -.pagination-sm > li:last-child > a, -.pagination-sm > li:last-child > span { - border-top-right-radius: 3px; - border-bottom-right-radius: 3px; -} - -.pager { - padding-left: 0; - margin: 20px 0; - text-align: center; - list-style: none; -} - -.pager:before, -.pager:after { - display: table; - content: " "; -} - -.pager:after { - clear: both; -} - -.pager:before, -.pager:after { - display: table; - content: " "; -} - -.pager:after { - clear: both; -} - -.pager li { - display: inline; -} - -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 15px; -} - -.pager li > a:hover, -.pager li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} - -.pager .next > a, -.pager .next > span { - float: right; -} - -.pager .previous > a, -.pager .previous > span { - float: left; -} - -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > a:focus, -.pager .disabled > span { - color: #999999; - cursor: not-allowed; - background-color: #ffffff; -} - -.label { - display: inline; - padding: .2em .6em .3em; - font-size: 75%; - font-weight: bold; - line-height: 1; - color: #ffffff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: .25em; -} - -.label[href]:hover, -.label[href]:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.label:empty { - display: none; -} - -.label-default { - background-color: #999999; -} - -.label-default[href]:hover, -.label-default[href]:focus { - background-color: #808080; -} - -.label-primary { - background-color: #428bca; -} - -.label-primary[href]:hover, -.label-primary[href]:focus { - background-color: #3071a9; -} - -.label-success { - background-color: #5cb85c; -} - -.label-success[href]:hover, -.label-success[href]:focus { - background-color: #449d44; -} - -.label-info { - background-color: #5bc0de; -} - -.label-info[href]:hover, -.label-info[href]:focus { - background-color: #31b0d5; -} - -.label-warning { - background-color: #f0ad4e; -} - -.label-warning[href]:hover, -.label-warning[href]:focus { - background-color: #ec971f; -} - -.label-danger { - background-color: #d9534f; -} - -.label-danger[href]:hover, -.label-danger[href]:focus { - background-color: #c9302c; -} - -.badge { - display: inline-block; - min-width: 10px; - padding: 3px 7px; - font-size: 12px; - font-weight: bold; - line-height: 1; - color: #ffffff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - background-color: #999999; - border-radius: 10px; -} - -.badge:empty { - display: none; -} - -a.badge:hover, -a.badge:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.btn .badge { - position: relative; - top: -1px; -} - -a.list-group-item.active > .badge, -.nav-pills > .active > a > .badge { - color: #428bca; - background-color: #ffffff; -} - -.nav-pills > li > a > .badge { - margin-left: 3px; -} - -.jumbotron { - padding: 30px; - margin-bottom: 30px; - font-size: 21px; - font-weight: 200; - line-height: 2.1428571435; - color: inherit; - background-color: #eeeeee; -} - -.jumbotron h1 { - line-height: 1; - color: inherit; -} - -.jumbotron p { - line-height: 1.4; -} - -.container .jumbotron { - border-radius: 6px; -} - -@media screen and (min-width: 768px) { - .jumbotron { - padding-top: 48px; - padding-bottom: 48px; - } - .container .jumbotron { - padding-right: 60px; - padding-left: 60px; - } - .jumbotron h1 { - font-size: 63px; - } -} - -.thumbnail { - display: inline-block; - display: block; - height: auto; - max-width: 100%; - padding: 4px; - line-height: 1.428571429; - background-color: #ffffff; - border: 1px solid #dddddd; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -.thumbnail > img { - display: block; - height: auto; - max-width: 100%; -} - -a.thumbnail:hover, -a.thumbnail:focus { - border-color: #428bca; -} - -.thumbnail > img { - margin-right: auto; - margin-left: auto; -} - -.thumbnail .caption { - padding: 9px; - color: #333333; -} - -.alert { - padding: 15px; - margin-bottom: 20px; - border: 1px solid transparent; - border-radius: 4px; -} - -.alert h4 { - margin-top: 0; - color: inherit; -} - -.alert .alert-link { - font-weight: bold; -} - -.alert > p, -.alert > ul { - margin-bottom: 0; -} - -.alert > p + p { - margin-top: 5px; -} - -.alert-dismissable { - padding-right: 35px; -} - -.alert-dismissable .close { - position: relative; - top: -2px; - right: -21px; - color: inherit; -} - -.alert-success { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.alert-success hr { - border-top-color: #c9e2b3; -} - -.alert-success .alert-link { - color: #356635; -} - -.alert-info { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.alert-info hr { - border-top-color: #a6e1ec; -} - -.alert-info .alert-link { - color: #2d6987; -} - -.alert-warning { - color: #c09853; - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.alert-warning hr { - border-top-color: #f8e5be; -} - -.alert-warning .alert-link { - color: #a47e3c; -} - -.alert-danger { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.alert-danger hr { - border-top-color: #e6c1c7; -} - -.alert-danger .alert-link { - color: #953b39; -} - -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-moz-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-o-keyframes progress-bar-stripes { - from { - background-position: 0 0; - } - to { - background-position: 40px 0; - } -} - -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -.progress { - height: 20px; - margin-bottom: 20px; - overflow: hidden; - background-color: #f5f5f5; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); -} - -.progress-bar { - float: left; - width: 0; - height: 100%; - font-size: 12px; - color: #ffffff; - text-align: center; - background-color: #428bca; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -webkit-transition: width 0.6s ease; - transition: width 0.6s ease; -} - -.progress-striped .progress-bar { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-size: 40px 40px; -} - -.progress.active .progress-bar { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -moz-animation: progress-bar-stripes 2s linear infinite; - -ms-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} - -.progress-bar-success { - background-color: #5cb85c; -} - -.progress-striped .progress-bar-success { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-info { - background-color: #5bc0de; -} - -.progress-striped .progress-bar-info { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-warning { - background-color: #f0ad4e; -} - -.progress-striped .progress-bar-warning { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-bar-danger { - background-color: #d9534f; -} - -.progress-striped .progress-bar-danger { - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.media, -.media-body { - overflow: hidden; - zoom: 1; -} - -.media, -.media .media { - margin-top: 15px; -} - -.media:first-child { - margin-top: 0; -} - -.media-object { - display: block; -} - -.media-heading { - margin: 0 0 5px; -} - -.media > .pull-left { - margin-right: 10px; -} - -.media > .pull-right { - margin-left: 10px; -} - -.media-list { - padding-left: 0; - list-style: none; -} - -.list-group { - padding-left: 0; - margin-bottom: 20px; -} - -.list-group-item { - position: relative; - display: block; - padding: 10px 15px; - margin-bottom: -1px; - background-color: #ffffff; - border: 1px solid #dddddd; -} - -.list-group-item:first-child { - border-top-right-radius: 4px; - border-top-left-radius: 4px; -} - -.list-group-item:last-child { - margin-bottom: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} - -.list-group-item > .badge { - float: right; -} - -.list-group-item > .badge + .badge { - margin-right: 5px; -} - -a.list-group-item { - color: #555555; -} - -a.list-group-item .list-group-item-heading { - color: #333333; -} - -a.list-group-item:hover, -a.list-group-item:focus { - text-decoration: none; - background-color: #f5f5f5; -} - -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - z-index: 2; - color: #ffffff; - background-color: #428bca; - border-color: #428bca; -} - -.list-group-item.active .list-group-item-heading, -.list-group-item.active:hover .list-group-item-heading, -.list-group-item.active:focus .list-group-item-heading { - color: inherit; -} - -.list-group-item.active .list-group-item-text, -.list-group-item.active:hover .list-group-item-text, -.list-group-item.active:focus .list-group-item-text { - color: #e1edf7; -} - -.list-group-item-heading { - margin-top: 0; - margin-bottom: 5px; -} - -.list-group-item-text { - margin-bottom: 0; - line-height: 1.3; -} - -.panel { - margin-bottom: 20px; - background-color: #ffffff; - border: 1px solid transparent; - border-radius: 4px; - -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.panel-body { - padding: 15px; -} - -.panel-body:before, -.panel-body:after { - display: table; - content: " "; -} - -.panel-body:after { - clear: both; -} - -.panel-body:before, -.panel-body:after { - display: table; - content: " "; -} - -.panel-body:after { - clear: both; -} - -.panel > .list-group { - margin-bottom: 0; -} - -.panel > .list-group .list-group-item { - border-width: 1px 0; -} - -.panel > .list-group .list-group-item:first-child { - border-top-right-radius: 0; - border-top-left-radius: 0; -} - -.panel > .list-group .list-group-item:last-child { - border-bottom: 0; -} - -.panel-heading + .list-group .list-group-item:first-child { - border-top-width: 0; -} - -.panel > .table { - margin-bottom: 0; -} - -.panel > .panel-body + .table { - border-top: 1px solid #dddddd; -} - -.panel-heading { - padding: 10px 15px; - border-bottom: 1px solid transparent; - border-top-right-radius: 3px; - border-top-left-radius: 3px; -} - -.panel-title { - margin-top: 0; - margin-bottom: 0; - font-size: 16px; -} - -.panel-title > a { - color: inherit; -} - -.panel-footer { - padding: 10px 15px; - background-color: #f5f5f5; - border-top: 1px solid #dddddd; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} - -.panel-group .panel { - margin-bottom: 0; - overflow: hidden; - border-radius: 4px; -} - -.panel-group .panel + .panel { - margin-top: 5px; -} - -.panel-group .panel-heading { - border-bottom: 0; -} - -.panel-group .panel-heading + .panel-collapse .panel-body { - border-top: 1px solid #dddddd; -} - -.panel-group .panel-footer { - border-top: 0; -} - -.panel-group .panel-footer + .panel-collapse .panel-body { - border-bottom: 1px solid #dddddd; -} - -.panel-default { - border-color: #dddddd; -} - -.panel-default > .panel-heading { - color: #333333; - background-color: #f5f5f5; - border-color: #dddddd; -} - -.panel-default > .panel-heading + .panel-collapse .panel-body { - border-top-color: #dddddd; -} - -.panel-default > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #dddddd; -} - -.panel-primary { - border-color: #428bca; -} - -.panel-primary > .panel-heading { - color: #ffffff; - background-color: #428bca; - border-color: #428bca; -} - -.panel-primary > .panel-heading + .panel-collapse .panel-body { - border-top-color: #428bca; -} - -.panel-primary > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #428bca; -} - -.panel-success { - border-color: #d6e9c6; -} - -.panel-success > .panel-heading { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.panel-success > .panel-heading + .panel-collapse .panel-body { - border-top-color: #d6e9c6; -} - -.panel-success > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #d6e9c6; -} - -.panel-warning { - border-color: #fbeed5; -} - -.panel-warning > .panel-heading { - color: #c09853; - background-color: #fcf8e3; - border-color: #fbeed5; -} - -.panel-warning > .panel-heading + .panel-collapse .panel-body { - border-top-color: #fbeed5; -} - -.panel-warning > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #fbeed5; -} - -.panel-danger { - border-color: #eed3d7; -} - -.panel-danger > .panel-heading { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.panel-danger > .panel-heading + .panel-collapse .panel-body { - border-top-color: #eed3d7; -} - -.panel-danger > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #eed3d7; -} - -.panel-info { - border-color: #bce8f1; -} - -.panel-info > .panel-heading { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.panel-info > .panel-heading + .panel-collapse .panel-body { - border-top-color: #bce8f1; -} - -.panel-info > .panel-footer + .panel-collapse .panel-body { - border-bottom-color: #bce8f1; -} - -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #f5f5f5; - border: 1px solid #e3e3e3; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, 0.15); -} - -.well-lg { - padding: 24px; - border-radius: 6px; -} - -.well-sm { - padding: 9px; - border-radius: 3px; -} - -.close { - float: right; - font-size: 21px; - font-weight: bold; - line-height: 1; - color: #000000; - text-shadow: 0 1px 0 #ffffff; - opacity: 0.2; - filter: alpha(opacity=20); -} - -.close:hover, -.close:focus { - color: #000000; - text-decoration: none; - cursor: pointer; - opacity: 0.5; - filter: alpha(opacity=50); -} - -button.close { - padding: 0; - cursor: pointer; - background: transparent; - border: 0; - -webkit-appearance: none; -} - -.modal-open { - overflow: hidden; -} - -body.modal-open, -.modal-open .navbar-fixed-top, -.modal-open .navbar-fixed-bottom { - margin-right: 15px; -} - -.modal { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - display: none; - overflow: auto; - overflow-y: scroll; -} - -.modal.fade .modal-dialog { - -webkit-transform: translate(0, -25%); - -ms-transform: translate(0, -25%); - transform: translate(0, -25%); - -webkit-transition: -webkit-transform 0.3s ease-out; - -moz-transition: -moz-transform 0.3s ease-out; - -o-transition: -o-transform 0.3s ease-out; - transition: transform 0.3s ease-out; -} - -.modal.in .modal-dialog { - -webkit-transform: translate(0, 0); - -ms-transform: translate(0, 0); - transform: translate(0, 0); -} - -.modal-dialog { - z-index: 1050; - width: auto; - padding: 10px; - margin-right: auto; - margin-left: auto; -} - -.modal-content { - position: relative; - background-color: #ffffff; - border: 1px solid #999999; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - outline: none; - -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - background-clip: padding-box; -} - -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1030; - background-color: #000000; -} - -.modal-backdrop.fade { - opacity: 0; - filter: alpha(opacity=0); -} - -.modal-backdrop.in { - opacity: 0.5; - filter: alpha(opacity=50); -} - -.modal-header { - min-height: 16.428571429px; - padding: 15px; - border-bottom: 1px solid #e5e5e5; -} - -.modal-header .close { - margin-top: -2px; -} - -.modal-title { - margin: 0; - line-height: 1.428571429; -} - -.modal-body { - position: relative; - padding: 20px; -} - -.modal-footer { - padding: 19px 20px 20px; - margin-top: 15px; - text-align: right; - border-top: 1px solid #e5e5e5; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer .btn + .btn { - margin-bottom: 0; - margin-left: 5px; -} - -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} - -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} - -@media screen and (min-width: 768px) { - .modal-dialog { - right: auto; - left: 50%; - width: 600px; - padding-top: 30px; - padding-bottom: 30px; - } - .modal-content { - -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - } -} - -.tooltip { - position: absolute; - z-index: 1030; - display: block; - font-size: 12px; - line-height: 1.4; - opacity: 0; - filter: alpha(opacity=0); - visibility: visible; -} - -.tooltip.in { - opacity: 0.9; - filter: alpha(opacity=90); -} - -.tooltip.top { - padding: 5px 0; - margin-top: -3px; -} - -.tooltip.right { - padding: 0 5px; - margin-left: 3px; -} - -.tooltip.bottom { - padding: 5px 0; - margin-top: 3px; -} - -.tooltip.left { - padding: 0 5px; - margin-left: -3px; -} - -.tooltip-inner { - max-width: 200px; - padding: 3px 8px; - color: #ffffff; - text-align: center; - text-decoration: none; - background-color: #000000; - border-radius: 4px; -} - -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.top-left .tooltip-arrow { - bottom: 0; - left: 5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.top-right .tooltip-arrow { - right: 5px; - bottom: 0; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-right-color: #000000; - border-width: 5px 5px 5px 0; -} - -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-left-color: #000000; - border-width: 5px 0 5px 5px; -} - -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.tooltip.bottom-left .tooltip-arrow { - top: 0; - left: 5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.tooltip.bottom-right .tooltip-arrow { - top: 0; - right: 5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1010; - display: none; - max-width: 276px; - padding: 1px; - text-align: left; - white-space: normal; - background-color: #ffffff; - border: 1px solid #cccccc; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - background-clip: padding-box; -} - -.popover.top { - margin-top: -10px; -} - -.popover.right { - margin-left: 10px; -} - -.popover.bottom { - margin-top: 10px; -} - -.popover.left { - margin-left: -10px; -} - -.popover-title { - padding: 8px 14px; - margin: 0; - font-size: 14px; - font-weight: normal; - line-height: 18px; - background-color: #f7f7f7; - border-bottom: 1px solid #ebebeb; - border-radius: 5px 5px 0 0; -} - -.popover-content { - padding: 9px 14px; -} - -.popover .arrow, -.popover .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.popover .arrow { - border-width: 11px; -} - -.popover .arrow:after { - border-width: 10px; - content: ""; -} - -.popover.top .arrow { - bottom: -11px; - left: 50%; - margin-left: -11px; - border-top-color: #999999; - border-top-color: rgba(0, 0, 0, 0.25); - border-bottom-width: 0; -} - -.popover.top .arrow:after { - bottom: 1px; - margin-left: -10px; - border-top-color: #ffffff; - border-bottom-width: 0; - content: " "; -} - -.popover.right .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-right-color: #999999; - border-right-color: rgba(0, 0, 0, 0.25); - border-left-width: 0; -} - -.popover.right .arrow:after { - bottom: -10px; - left: 1px; - border-right-color: #ffffff; - border-left-width: 0; - content: " "; -} - -.popover.bottom .arrow { - top: -11px; - left: 50%; - margin-left: -11px; - border-bottom-color: #999999; - border-bottom-color: rgba(0, 0, 0, 0.25); - border-top-width: 0; -} - -.popover.bottom .arrow:after { - top: 1px; - margin-left: -10px; - border-bottom-color: #ffffff; - border-top-width: 0; - content: " "; -} - -.popover.left .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-left-color: #999999; - border-left-color: rgba(0, 0, 0, 0.25); - border-right-width: 0; -} - -.popover.left .arrow:after { - right: 1px; - bottom: -10px; - border-left-color: #ffffff; - border-right-width: 0; - content: " "; -} - -.carousel { - position: relative; -} - -.carousel-inner { - position: relative; - width: 100%; - overflow: hidden; -} - -.carousel-inner > .item { - position: relative; - display: none; - -webkit-transition: 0.6s ease-in-out left; - transition: 0.6s ease-in-out left; -} - -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - display: block; - height: auto; - max-width: 100%; - line-height: 1; -} - -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} - -.carousel-inner > .active { - left: 0; -} - -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} - -.carousel-inner > .next { - left: 100%; -} - -.carousel-inner > .prev { - left: -100%; -} - -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} - -.carousel-inner > .active.left { - left: -100%; -} - -.carousel-inner > .active.right { - left: 100%; -} - -.carousel-control { - position: absolute; - top: 0; - bottom: 0; - left: 0; - width: 15%; - font-size: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); - opacity: 0.5; - filter: alpha(opacity=50); -} - -.carousel-control.left { - background-image: -webkit-gradient(linear, 0 top, 100% top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001))); - background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, 0.5) 0), color-stop(rgba(0, 0, 0, 0.0001) 100%)); - background-image: -moz-linear-gradient(left, rgba(0, 0, 0, 0.5) 0, rgba(0, 0, 0, 0.0001) 100%); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0, rgba(0, 0, 0, 0.0001) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); -} - -.carousel-control.right { - right: 0; - left: auto; - background-image: -webkit-gradient(linear, 0 top, 100% top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5))); - background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, 0.0001) 0), color-stop(rgba(0, 0, 0, 0.5) 100%)); - background-image: -moz-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0, rgba(0, 0, 0, 0.5) 100%); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0, rgba(0, 0, 0, 0.5) 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); -} - -.carousel-control:hover, -.carousel-control:focus { - color: #ffffff; - text-decoration: none; - opacity: 0.9; - filter: alpha(opacity=90); -} - -.carousel-control .icon-prev, -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-left, -.carousel-control .glyphicon-chevron-right { - position: absolute; - top: 50%; - left: 50%; - z-index: 5; - display: inline-block; -} - -.carousel-control .icon-prev, -.carousel-control .icon-next { - width: 20px; - height: 20px; - margin-top: -10px; - margin-left: -10px; - font-family: serif; -} - -.carousel-control .icon-prev:before { - content: '\2039'; -} - -.carousel-control .icon-next:before { - content: '\203a'; -} - -.carousel-indicators { - position: absolute; - bottom: 10px; - left: 50%; - z-index: 15; - width: 60%; - padding-left: 0; - margin-left: -30%; - text-align: center; - list-style: none; -} - -.carousel-indicators li { - display: inline-block; - width: 10px; - height: 10px; - margin: 1px; - text-indent: -999px; - cursor: pointer; - border: 1px solid #ffffff; - border-radius: 10px; -} - -.carousel-indicators .active { - width: 12px; - height: 12px; - margin: 0; - background-color: #ffffff; -} - -.carousel-caption { - position: absolute; - right: 15%; - bottom: 20px; - left: 15%; - z-index: 10; - padding-top: 20px; - padding-bottom: 20px; - color: #ffffff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); -} - -.carousel-caption .btn { - text-shadow: none; -} - -@media screen and (min-width: 768px) { - .carousel-control .icon-prev, - .carousel-control .icon-next { - width: 30px; - height: 30px; - margin-top: -15px; - margin-left: -15px; - font-size: 30px; - } - .carousel-caption { - right: 20%; - left: 20%; - padding-bottom: 30px; - } - .carousel-indicators { - bottom: 20px; - } -} - -.clearfix:before, -.clearfix:after { - display: table; - content: " "; -} - -.clearfix:after { - clear: both; -} - -.pull-right { - float: right !important; -} - -.pull-left { - float: left !important; -} - -.hide { - display: none !important; -} - -.show { - display: block !important; -} - -.invisible { - visibility: hidden; -} - -.text-hide { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} - -.affix { - position: fixed; -} - -@-ms-viewport { - width: device-width; -} - -@media screen and (max-width: 400px) { - @-ms-viewport { - width: 320px; - } -} - -.hidden { - display: none !important; - visibility: hidden !important; -} - -.visible-xs { - display: none !important; -} - -tr.visible-xs { - display: none !important; -} - -th.visible-xs, -td.visible-xs { - display: none !important; -} - -@media (max-width: 767px) { - .visible-xs { - display: block !important; - } - tr.visible-xs { - display: table-row !important; - } - th.visible-xs, - td.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-xs.visible-sm { - display: block !important; - } - tr.visible-xs.visible-sm { - display: table-row !important; - } - th.visible-xs.visible-sm, - td.visible-xs.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-xs.visible-md { - display: block !important; - } - tr.visible-xs.visible-md { - display: table-row !important; - } - th.visible-xs.visible-md, - td.visible-xs.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-xs.visible-lg { - display: block !important; - } - tr.visible-xs.visible-lg { - display: table-row !important; - } - th.visible-xs.visible-lg, - td.visible-xs.visible-lg { - display: table-cell !important; - } -} - -.visible-sm { - display: none !important; -} - -tr.visible-sm { - display: none !important; -} - -th.visible-sm, -td.visible-sm { - display: none !important; -} - -@media (max-width: 767px) { - .visible-sm.visible-xs { - display: block !important; - } - tr.visible-sm.visible-xs { - display: table-row !important; - } - th.visible-sm.visible-xs, - td.visible-sm.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm { - display: block !important; - } - tr.visible-sm { - display: table-row !important; - } - th.visible-sm, - td.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-sm.visible-md { - display: block !important; - } - tr.visible-sm.visible-md { - display: table-row !important; - } - th.visible-sm.visible-md, - td.visible-sm.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-sm.visible-lg { - display: block !important; - } - tr.visible-sm.visible-lg { - display: table-row !important; - } - th.visible-sm.visible-lg, - td.visible-sm.visible-lg { - display: table-cell !important; - } -} - -.visible-md { - display: none !important; -} - -tr.visible-md { - display: none !important; -} - -th.visible-md, -td.visible-md { - display: none !important; -} - -@media (max-width: 767px) { - .visible-md.visible-xs { - display: block !important; - } - tr.visible-md.visible-xs { - display: table-row !important; - } - th.visible-md.visible-xs, - td.visible-md.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-md.visible-sm { - display: block !important; - } - tr.visible-md.visible-sm { - display: table-row !important; - } - th.visible-md.visible-sm, - td.visible-md.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md { - display: block !important; - } - tr.visible-md { - display: table-row !important; - } - th.visible-md, - td.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-md.visible-lg { - display: block !important; - } - tr.visible-md.visible-lg { - display: table-row !important; - } - th.visible-md.visible-lg, - td.visible-md.visible-lg { - display: table-cell !important; - } -} - -.visible-lg { - display: none !important; -} - -tr.visible-lg { - display: none !important; -} - -th.visible-lg, -td.visible-lg { - display: none !important; -} - -@media (max-width: 767px) { - .visible-lg.visible-xs { - display: block !important; - } - tr.visible-lg.visible-xs { - display: table-row !important; - } - th.visible-lg.visible-xs, - td.visible-lg.visible-xs { - display: table-cell !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .visible-lg.visible-sm { - display: block !important; - } - tr.visible-lg.visible-sm { - display: table-row !important; - } - th.visible-lg.visible-sm, - td.visible-lg.visible-sm { - display: table-cell !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .visible-lg.visible-md { - display: block !important; - } - tr.visible-lg.visible-md { - display: table-row !important; - } - th.visible-lg.visible-md, - td.visible-lg.visible-md { - display: table-cell !important; - } -} - -@media (min-width: 1200px) { - .visible-lg { - display: block !important; - } - tr.visible-lg { - display: table-row !important; - } - th.visible-lg, - td.visible-lg { - display: table-cell !important; - } -} - -.hidden-xs { - display: block !important; -} - -tr.hidden-xs { - display: table-row !important; -} - -th.hidden-xs, -td.hidden-xs { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-xs { - display: none !important; - } - tr.hidden-xs { - display: none !important; - } - th.hidden-xs, - td.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-xs.hidden-sm { - display: none !important; - } - tr.hidden-xs.hidden-sm { - display: none !important; - } - th.hidden-xs.hidden-sm, - td.hidden-xs.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-xs.hidden-md { - display: none !important; - } - tr.hidden-xs.hidden-md { - display: none !important; - } - th.hidden-xs.hidden-md, - td.hidden-xs.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-xs.hidden-lg { - display: none !important; - } - tr.hidden-xs.hidden-lg { - display: none !important; - } - th.hidden-xs.hidden-lg, - td.hidden-xs.hidden-lg { - display: none !important; - } -} - -.hidden-sm { - display: block !important; -} - -tr.hidden-sm { - display: table-row !important; -} - -th.hidden-sm, -td.hidden-sm { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-sm.hidden-xs { - display: none !important; - } - tr.hidden-sm.hidden-xs { - display: none !important; - } - th.hidden-sm.hidden-xs, - td.hidden-sm.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-sm { - display: none !important; - } - tr.hidden-sm { - display: none !important; - } - th.hidden-sm, - td.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-sm.hidden-md { - display: none !important; - } - tr.hidden-sm.hidden-md { - display: none !important; - } - th.hidden-sm.hidden-md, - td.hidden-sm.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-sm.hidden-lg { - display: none !important; - } - tr.hidden-sm.hidden-lg { - display: none !important; - } - th.hidden-sm.hidden-lg, - td.hidden-sm.hidden-lg { - display: none !important; - } -} - -.hidden-md { - display: block !important; -} - -tr.hidden-md { - display: table-row !important; -} - -th.hidden-md, -td.hidden-md { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-md.hidden-xs { - display: none !important; - } - tr.hidden-md.hidden-xs { - display: none !important; - } - th.hidden-md.hidden-xs, - td.hidden-md.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-md.hidden-sm { - display: none !important; - } - tr.hidden-md.hidden-sm { - display: none !important; - } - th.hidden-md.hidden-sm, - td.hidden-md.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-md { - display: none !important; - } - tr.hidden-md { - display: none !important; - } - th.hidden-md, - td.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-md.hidden-lg { - display: none !important; - } - tr.hidden-md.hidden-lg { - display: none !important; - } - th.hidden-md.hidden-lg, - td.hidden-md.hidden-lg { - display: none !important; - } -} - -.hidden-lg { - display: block !important; -} - -tr.hidden-lg { - display: table-row !important; -} - -th.hidden-lg, -td.hidden-lg { - display: table-cell !important; -} - -@media (max-width: 767px) { - .hidden-lg.hidden-xs { - display: none !important; - } - tr.hidden-lg.hidden-xs { - display: none !important; - } - th.hidden-lg.hidden-xs, - td.hidden-lg.hidden-xs { - display: none !important; - } -} - -@media (min-width: 768px) and (max-width: 991px) { - .hidden-lg.hidden-sm { - display: none !important; - } - tr.hidden-lg.hidden-sm { - display: none !important; - } - th.hidden-lg.hidden-sm, - td.hidden-lg.hidden-sm { - display: none !important; - } -} - -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-lg.hidden-md { - display: none !important; - } - tr.hidden-lg.hidden-md { - display: none !important; - } - th.hidden-lg.hidden-md, - td.hidden-lg.hidden-md { - display: none !important; - } -} - -@media (min-width: 1200px) { - .hidden-lg { - display: none !important; - } - tr.hidden-lg { - display: none !important; - } - th.hidden-lg, - td.hidden-lg { - display: none !important; - } -} - -.visible-print { - display: none !important; -} - -tr.visible-print { - display: none !important; -} - -th.visible-print, -td.visible-print { - display: none !important; -} - -@media print { - .visible-print { - display: block !important; - } - tr.visible-print { - display: table-row !important; - } - th.visible-print, - td.visible-print { - display: table-cell !important; - } - .hidden-print { - display: none !important; - } - tr.hidden-print { - display: none !important; - } - th.hidden-print, - td.hidden-print { - display: none !important; - } -} \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css old mode 100755 new mode 100644 index a553c4f5e08a..ed3905e0e0c9 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/bootstrap.min.css @@ -1,9 +1,6 @@ /*! - * Bootstrap v3.0.0 - * - * Copyright 2013 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world by @mdo and @fat. - *//*! normalize.css v2.1.0 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden]{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{margin:.67em 0;font-size:2em}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}dfn{font-style:italic}hr{height:0;-moz-box-sizing:content-box;box-sizing:content-box}mark{color:#000;background:#ff0}code,kbd,pre,samp{font-family:monospace,serif;font-size:1em}pre{white-space:pre-wrap}q{quotes:"\201C" "\201D" "\2018" "\2019"}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:0}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid #c0c0c0}legend{padding:0;border:0}button,input,select,textarea{margin:0;font-family:inherit;font-size:100%}button,input{line-height:normal}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}button[disabled],html input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{padding:0;box-sizing:border-box}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:2cm .5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}*,*:before,*:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}button,input,select[multiple],textarea{background-image:none}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}img{vertical-align:middle}.img-responsive{display:block;height:auto;max-width:100%}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0 0 0 0);border:0}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16.099999999999998px;font-weight:200;line-height:1.4}@media(min-width:768px){.lead{font-size:21px}}small{font-size:85%}cite{font-style:normal}.text-muted{color:#999}.text-primary{color:#428bca}.text-warning{color:#c09853}.text-danger{color:#b94a48}.text-success{color:#468847}.text-info{color:#3a87ad}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-weight:500;line-height:1.1}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{margin-top:20px;margin-bottom:10px}h4,h5,h6{margin-top:10px;margin-bottom:10px}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}h1 small,.h1 small{font-size:24px}h2 small,.h2 small{font-size:18px}h3 small,.h3 small,h4 small,.h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-bottom:20px}dt,dd{line-height:1.428571429}dt{font-weight:bold}dd{margin-left:0}@media(min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{font-size:17.5px;font-weight:300;line-height:1.25}blockquote p:last-child{margin-bottom:0}blockquote small{display:block;line-height:1.428571429;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:1.428571429}code,pre{font-family:Monaco,Menlo,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;white-space:nowrap;background-color:#f9f2f4;border-radius:4px}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.428571429;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.row{margin-right:-15px;margin-left:-15px}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11{float:left}.col-xs-1{width:8.333333333333332%}.col-xs-2{width:16.666666666666664%}.col-xs-3{width:25%}.col-xs-4{width:33.33333333333333%}.col-xs-5{width:41.66666666666667%}.col-xs-6{width:50%}.col-xs-7{width:58.333333333333336%}.col-xs-8{width:66.66666666666666%}.col-xs-9{width:75%}.col-xs-10{width:83.33333333333334%}.col-xs-11{width:91.66666666666666%}.col-xs-12{width:100%}@media(min-width:768px){.container{max-width:750px}.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11{float:left}.col-sm-1{width:8.333333333333332%}.col-sm-2{width:16.666666666666664%}.col-sm-3{width:25%}.col-sm-4{width:33.33333333333333%}.col-sm-5{width:41.66666666666667%}.col-sm-6{width:50%}.col-sm-7{width:58.333333333333336%}.col-sm-8{width:66.66666666666666%}.col-sm-9{width:75%}.col-sm-10{width:83.33333333333334%}.col-sm-11{width:91.66666666666666%}.col-sm-12{width:100%}.col-sm-push-1{left:8.333333333333332%}.col-sm-push-2{left:16.666666666666664%}.col-sm-push-3{left:25%}.col-sm-push-4{left:33.33333333333333%}.col-sm-push-5{left:41.66666666666667%}.col-sm-push-6{left:50%}.col-sm-push-7{left:58.333333333333336%}.col-sm-push-8{left:66.66666666666666%}.col-sm-push-9{left:75%}.col-sm-push-10{left:83.33333333333334%}.col-sm-push-11{left:91.66666666666666%}.col-sm-pull-1{right:8.333333333333332%}.col-sm-pull-2{right:16.666666666666664%}.col-sm-pull-3{right:25%}.col-sm-pull-4{right:33.33333333333333%}.col-sm-pull-5{right:41.66666666666667%}.col-sm-pull-6{right:50%}.col-sm-pull-7{right:58.333333333333336%}.col-sm-pull-8{right:66.66666666666666%}.col-sm-pull-9{right:75%}.col-sm-pull-10{right:83.33333333333334%}.col-sm-pull-11{right:91.66666666666666%}.col-sm-offset-1{margin-left:8.333333333333332%}.col-sm-offset-2{margin-left:16.666666666666664%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-4{margin-left:33.33333333333333%}.col-sm-offset-5{margin-left:41.66666666666667%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-7{margin-left:58.333333333333336%}.col-sm-offset-8{margin-left:66.66666666666666%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-10{margin-left:83.33333333333334%}.col-sm-offset-11{margin-left:91.66666666666666%}}@media(min-width:992px){.container{max-width:970px}.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11{float:left}.col-md-1{width:8.333333333333332%}.col-md-2{width:16.666666666666664%}.col-md-3{width:25%}.col-md-4{width:33.33333333333333%}.col-md-5{width:41.66666666666667%}.col-md-6{width:50%}.col-md-7{width:58.333333333333336%}.col-md-8{width:66.66666666666666%}.col-md-9{width:75%}.col-md-10{width:83.33333333333334%}.col-md-11{width:91.66666666666666%}.col-md-12{width:100%}.col-md-push-0{left:auto}.col-md-push-1{left:8.333333333333332%}.col-md-push-2{left:16.666666666666664%}.col-md-push-3{left:25%}.col-md-push-4{left:33.33333333333333%}.col-md-push-5{left:41.66666666666667%}.col-md-push-6{left:50%}.col-md-push-7{left:58.333333333333336%}.col-md-push-8{left:66.66666666666666%}.col-md-push-9{left:75%}.col-md-push-10{left:83.33333333333334%}.col-md-push-11{left:91.66666666666666%}.col-md-pull-0{right:auto}.col-md-pull-1{right:8.333333333333332%}.col-md-pull-2{right:16.666666666666664%}.col-md-pull-3{right:25%}.col-md-pull-4{right:33.33333333333333%}.col-md-pull-5{right:41.66666666666667%}.col-md-pull-6{right:50%}.col-md-pull-7{right:58.333333333333336%}.col-md-pull-8{right:66.66666666666666%}.col-md-pull-9{right:75%}.col-md-pull-10{right:83.33333333333334%}.col-md-pull-11{right:91.66666666666666%}.col-md-offset-0{margin-left:0}.col-md-offset-1{margin-left:8.333333333333332%}.col-md-offset-2{margin-left:16.666666666666664%}.col-md-offset-3{margin-left:25%}.col-md-offset-4{margin-left:33.33333333333333%}.col-md-offset-5{margin-left:41.66666666666667%}.col-md-offset-6{margin-left:50%}.col-md-offset-7{margin-left:58.333333333333336%}.col-md-offset-8{margin-left:66.66666666666666%}.col-md-offset-9{margin-left:75%}.col-md-offset-10{margin-left:83.33333333333334%}.col-md-offset-11{margin-left:91.66666666666666%}}@media(min-width:1200px){.container{max-width:1170px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11{float:left}.col-lg-1{width:8.333333333333332%}.col-lg-2{width:16.666666666666664%}.col-lg-3{width:25%}.col-lg-4{width:33.33333333333333%}.col-lg-5{width:41.66666666666667%}.col-lg-6{width:50%}.col-lg-7{width:58.333333333333336%}.col-lg-8{width:66.66666666666666%}.col-lg-9{width:75%}.col-lg-10{width:83.33333333333334%}.col-lg-11{width:91.66666666666666%}.col-lg-12{width:100%}.col-lg-push-0{left:auto}.col-lg-push-1{left:8.333333333333332%}.col-lg-push-2{left:16.666666666666664%}.col-lg-push-3{left:25%}.col-lg-push-4{left:33.33333333333333%}.col-lg-push-5{left:41.66666666666667%}.col-lg-push-6{left:50%}.col-lg-push-7{left:58.333333333333336%}.col-lg-push-8{left:66.66666666666666%}.col-lg-push-9{left:75%}.col-lg-push-10{left:83.33333333333334%}.col-lg-push-11{left:91.66666666666666%}.col-lg-pull-0{right:auto}.col-lg-pull-1{right:8.333333333333332%}.col-lg-pull-2{right:16.666666666666664%}.col-lg-pull-3{right:25%}.col-lg-pull-4{right:33.33333333333333%}.col-lg-pull-5{right:41.66666666666667%}.col-lg-pull-6{right:50%}.col-lg-pull-7{right:58.333333333333336%}.col-lg-pull-8{right:66.66666666666666%}.col-lg-pull-9{right:75%}.col-lg-pull-10{right:83.33333333333334%}.col-lg-pull-11{right:91.66666666666666%}.col-lg-offset-0{margin-left:0}.col-lg-offset-1{margin-left:8.333333333333332%}.col-lg-offset-2{margin-left:16.666666666666664%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-4{margin-left:33.33333333333333%}.col-lg-offset-5{margin-left:41.66666666666667%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-7{margin-left:58.333333333333336%}.col-lg-offset-8{margin-left:66.66666666666666%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-10{margin-left:83.33333333333334%}.col-lg-offset-11{margin-left:91.66666666666666%}}table{max-width:100%;background-color:transparent}th{text-align:left}.table{width:100%;margin-bottom:20px}.table thead>tr>th,.table tbody>tr>th,.table tfoot>tr>th,.table thead>tr>td,.table tbody>tr>td,.table tfoot>tr>td{padding:8px;line-height:1.428571429;vertical-align:top;border-top:1px solid #ddd}.table thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table caption+thead tr:first-child th,.table colgroup+thead tr:first-child th,.table thead:first-child tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed thead>tr>th,.table-condensed tbody>tr>th,.table-condensed tfoot>tr>th,.table-condensed thead>tr>td,.table-condensed tbody>tr>td,.table-condensed tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*="col-"]{display:table-column;float:none}table td[class*="col-"],table th[class*="col-"]{display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8;border-color:#d6e9c6}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td{background-color:#d0e9c6;border-color:#c9e2b3}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede;border-color:#eed3d7}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td{background-color:#ebcccc;border-color:#e6c1c7}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3;border-color:#fbeed5}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td{background-color:#faf2cc;border-color:#f8e5be}@media(max-width:768px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:scroll;overflow-y:hidden;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0;background-color:#fff}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>thead>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>thead>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;margin-bottom:5px;font-weight:bold}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type="file"]{display:block}select[multiple],select[size]{height:auto}select optgroup{font-family:inherit;font-size:inherit;font-style:inherit}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}input[type="number"]::-webkit-outer-spin-button,input[type="number"]::-webkit-inner-spin-button{height:auto}.form-control:-moz-placeholder{color:#999}.form-control::-moz-placeholder{color:#999}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.428571429;color:#555;vertical-align:middle;background-color:#fff;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6)}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee}textarea.form-control{height:auto}.form-group{margin-bottom:15px}.radio,.checkbox{display:block;min-height:20px;padding-left:20px;margin-top:10px;margin-bottom:10px;vertical-align:middle}.radio label,.checkbox label{display:inline;margin-bottom:0;font-weight:normal;cursor:pointer}.radio input[type="radio"],.radio-inline input[type="radio"],.checkbox input[type="checkbox"],.checkbox-inline input[type="checkbox"]{float:left;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:normal;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type="radio"][disabled],input[type="checkbox"][disabled],.radio[disabled],.radio-inline[disabled],.checkbox[disabled],.checkbox-inline[disabled],fieldset[disabled] input[type="radio"],fieldset[disabled] input[type="checkbox"],fieldset[disabled] .radio,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm{height:auto}.input-lg{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:45px;line-height:45px}textarea.input-lg{height:auto}.has-warning .help-block,.has-warning .control-label{color:#c09853}.has-warning .form-control{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-warning .form-control:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.has-warning .input-group-addon{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.has-error .help-block,.has-error .control-label{color:#b94a48}.has-error .form-control{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-error .form-control:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.has-error .input-group-addon{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.has-success .help-block,.has-success .control-label{color:#468847}.has-success .form-control{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-success .form-control:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.has-success .input-group-addon{color:#468847;background-color:#dff0d8;border-color:#468847}.form-control-static{padding-top:7px;margin-bottom:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media(min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block}.form-inline .radio,.form-inline .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:none;margin-left:0}}.form-horizontal .control-label,.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}@media(min-width:768px){.form-horizontal .control-label{text-align:right}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:normal;line-height:1.428571429;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;border:1px solid transparent;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{color:#333;background-color:#ebebeb;border-color:#adadad}.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{color:#fff;background-color:#3276b1;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{color:#fff;background-color:#ed9c28;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{color:#fff;background-color:#d2322d;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{color:#fff;background-color:#47a447;border-color:#398439}.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{color:#fff;background-color:#39b3d7;border-color:#269abc}.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-link{font-weight:normal;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999;text-decoration:none}.btn-lg{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-xs{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs{padding:1px 5px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;transition:height .35s ease}@font-face{font-family:'Glyphicons Halflings';src:url('../fonts/glyphicons-halflings-regular.eot');src:url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'),url('../fonts/glyphicons-halflings-regular.woff') format('woff'),url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'),url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';-webkit-font-smoothing:antialiased;font-style:normal;font-weight:normal;line-height:1}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-print:before{content:"\e045"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-briefcase:before{content:"\1f4bc"}.glyphicon-calendar:before{content:"\1f4c5"}.glyphicon-pushpin:before{content:"\1f4cc"}.glyphicon-paperclip:before{content:"\1f4ce"}.glyphicon-camera:before{content:"\1f4f7"}.glyphicon-lock:before{content:"\1f512"}.glyphicon-bell:before{content:"\1f514"}.glyphicon-bookmark:before{content:"\1f516"}.glyphicon-fire:before{content:"\1f525"}.glyphicon-wrench:before{content:"\1f527"}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid #000;border-right:4px solid transparent;border-bottom:0 dotted;border-left:4px solid transparent;content:""}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,0.175);box-shadow:0 6px 12px rgba(0,0,0,0.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:1.428571429;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#fff;text-decoration:none;background-color:#428bca}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.428571429;color:#999}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0 dotted;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media(min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}}.btn-default .caret{border-top-color:#333}.btn-primary .caret,.btn-success .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret{border-top-color:#fff}.dropup .btn-default .caret{border-bottom-color:#333}.dropup .btn-primary .caret,.dropup .btn-success .caret,.dropup .btn-warning .caret,.dropup .btn-danger .caret,.dropup .btn-info .caret{border-bottom-color:#fff}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar .btn-group{float:left}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group,.btn-toolbar>.btn-group+.btn-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group-xs>.btn{padding:5px 10px;padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-right-radius:0;border-bottom-left-radius:4px;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child>.btn:last-child,.btn-group-vertical>.btn-group:first-child>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;border-collapse:separate;table-layout:fixed}.btn-group-justified .btn{display:table-cell;float:none;width:1%}[data-toggle="buttons"]>.btn>input[type="radio"],[data-toggle="buttons"]>.btn>input[type="checkbox"]{display:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group.col{float:none;padding-right:0;padding-left:0}.input-group .form-control{width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:45px;line-height:45px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:normal;line-height:1;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type="radio"],.input-group-addon input[type="checkbox"]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-4px}.input-group-btn>.btn:hover,.input-group-btn>.btn:active{z-index:2}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.428571429;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}}.nav-tabs.nav-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs.nav-justified>.active>a{border-bottom-color:#fff}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:5px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-justified>li{display:table-cell;width:1%}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs-justified>.active>a{border-bottom-color:#fff}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.nav .caret{border-top-color:#428bca;border-bottom-color:#428bca}.nav a:hover .caret{border-top-color:#2a6496;border-bottom-color:#2a6496}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;z-index:1000;min-height:50px;margin-bottom:20px;border:1px solid transparent}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}@media(min-width:768px){.navbar{border-radius:4px}}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}@media(min-width:768px){.navbar-header{float:left}}.navbar-collapse{max-height:340px;padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;box-shadow:inset 0 1px 0 rgba(255,255,255,0.1);-webkit-overflow-scrolling:touch}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse.in{overflow-y:auto}@media(min-width:768px){.navbar-collapse{width:auto;border-top:0;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-collapse .navbar-nav.navbar-left:first-child{margin-left:-15px}.navbar-collapse .navbar-nav.navbar-right:last-child{margin-right:-15px}.navbar-collapse .navbar-text:last-child{margin-right:0}}.container>.navbar-header,.container>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media(min-width:768px){.container>.navbar-header,.container>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{border-width:0 0 1px}@media(min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;border-width:0 0 1px}@media(min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;z-index:1030}.navbar-fixed-bottom{bottom:0;margin-bottom:0}.navbar-brand{float:left;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media(min-width:768px){.navbar>.container .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;border:1px solid transparent;border-radius:4px}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media(min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media(max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media(min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}@media(min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}@media(min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.navbar-form .radio input[type="radio"],.navbar-form .checkbox input[type="checkbox"]{float:none;margin-left:0}}@media(max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media(min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-nav.pull-right>li>.dropdown-menu,.navbar-nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-text{float:left;margin-top:15px;margin-bottom:15px}@media(min-width:768px){.navbar-text{margin-right:15px;margin-left:15px}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#ccc}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e6e6e6}.navbar-default .navbar-nav>.dropdown>a:hover .caret,.navbar-default .navbar-nav>.dropdown>a:focus .caret{border-top-color:#333;border-bottom-color:#333}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.open>a .caret,.navbar-default .navbar-nav>.open>a:hover .caret,.navbar-default .navbar-nav>.open>a:focus .caret{border-top-color:#555;border-bottom-color:#555}.navbar-default .navbar-nav>.dropdown>a .caret{border-top-color:#777;border-bottom-color:#777}@media(max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#999}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .navbar-nav>li>a{color:#999}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.dropdown>a:hover .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-nav>.dropdown>a .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .navbar-nav>.open>a .caret,.navbar-inverse .navbar-nav>.open>a:hover .caret,.navbar-inverse .navbar-nav>.open>a:focus .caret{border-top-color:#fff;border-bottom-color:#fff}@media(max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#999}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#999}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.428571429;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:4px;border-top-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{background-color:#eee}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:6px;border-top-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:3px;border-top-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}.label[href]:hover,.label[href]:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.label-default{background-color:#999}.label-default[href]:hover,.label-default[href]:focus{background-color:#808080}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#999;border-radius:10px}.badge:empty{display:none}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}.btn .badge{position:relative;top:-1px}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;font-size:21px;font-weight:200;line-height:2.1428571435;color:inherit;background-color:#eee}.jumbotron h1{line-height:1;color:inherit}.jumbotron p{line-height:1.4}.container .jumbotron{border-radius:6px}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1{font-size:63px}}.thumbnail{display:inline-block;display:block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img{display:block;height:auto;max-width:100%}a.thumbnail:hover,a.thumbnail:focus{border-color:#428bca}.thumbnail>img{margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:bold}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable{padding-right:35px}.alert-dismissable .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#356635}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#2d6987}.alert-warning{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.alert-warning hr{border-top-color:#f8e5be}.alert-warning .alert-link{color:#a47e3c}.alert-danger{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger hr{border-top-color:#e6c1c7}.alert-danger .alert-link{color:#953b39}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-size:40px 40px}.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-right-radius:4px;border-top-left-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{text-decoration:none;background-color:#f5f5f5}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.05);box-shadow:0 1px 1px rgba(0,0,0,0.05)}.panel-body{padding:15px}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0}.panel>.list-group .list-group-item:first-child{border-top-right-radius:0;border-top-left-radius:0}.panel>.list-group .list-group-item:last-child{border-bottom:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.panel>.table{margin-bottom:0}.panel>.panel-body+.table{border-top:1px solid #ddd}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:3px;border-top-left-radius:3px}.panel-title{margin-top:0;margin-bottom:0;font-size:16px}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-group .panel{margin-bottom:0;overflow:hidden;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse .panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse .panel-body{border-top-color:#ddd}.panel-default>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse .panel-body{border-top-color:#428bca}.panel-primary>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse .panel-body{border-top-color:#d6e9c6}.panel-success>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#d6e9c6}.panel-warning{border-color:#fbeed5}.panel-warning>.panel-heading{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.panel-warning>.panel-heading+.panel-collapse .panel-body{border-top-color:#fbeed5}.panel-warning>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#fbeed5}.panel-danger{border-color:#eed3d7}.panel-danger>.panel-heading{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.panel-danger>.panel-heading+.panel-collapse .panel-body{border-top-color:#eed3d7}.panel-danger>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#eed3d7}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse .panel-body{border-top-color:#bce8f1}.panel-info>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#bce8f1}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:bold;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}body.modal-open,.modal-open .navbar-fixed-top,.modal-open .navbar-fixed-bottom{margin-right:15px}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;display:none;overflow:auto;overflow-y:scroll}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-moz-transition:-moz-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.modal-dialog{z-index:1050;width:auto;padding:10px;margin-right:auto;margin-left:auto}.modal-content{position:relative;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,0.5);box-shadow:0 3px 9px rgba(0,0,0,0.5);background-clip:padding-box}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1030;background-color:#000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{min-height:16.428571429px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.428571429}.modal-body{position:relative;padding:20px}.modal-footer{padding:19px 20px 20px;margin-top:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}@media screen and (min-width:768px){.modal-dialog{right:auto;left:50%;width:600px;padding-top:30px;padding-bottom:30px}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,0.5);box-shadow:0 5px 15px rgba(0,0,0,0.5)}}.tooltip{position:absolute;z-index:1030;display:block;font-size:12px;line-height:1.4;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0;content:" "}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0;content:" "}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0;content:" "}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0;content:" "}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;height:auto;max-width:100%;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6);opacity:.5;filter:alpha(opacity=50)}.carousel-control.left{background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.5)),to(rgba(0,0,0,0.0001)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.5) 0),color-stop(rgba(0,0,0,0.0001) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000',endColorstr='#00000000',GradientType=1)}.carousel-control.right{right:0;left:auto;background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.0001)),to(rgba(0,0,0,0.5)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.0001) 0),color-stop(rgba(0,0,0,0.5) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000',endColorstr='#80000000',GradientType=1)}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;left:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;margin-left:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;margin-left:-15px;font-size:30px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after{display:table;content:" "}.clearfix:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.affix{position:fixed}@-ms-viewport{width:device-width}@media screen and (max-width:400px){@-ms-viewport{width:320px}}.hidden{display:none!important;visibility:hidden!important}.visible-xs{display:none!important}tr.visible-xs{display:none!important}th.visible-xs,td.visible-xs{display:none!important}@media(max-width:767px){.visible-xs{display:block!important}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-xs.visible-sm{display:block!important}tr.visible-xs.visible-sm{display:table-row!important}th.visible-xs.visible-sm,td.visible-xs.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-xs.visible-md{display:block!important}tr.visible-xs.visible-md{display:table-row!important}th.visible-xs.visible-md,td.visible-xs.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-xs.visible-lg{display:block!important}tr.visible-xs.visible-lg{display:table-row!important}th.visible-xs.visible-lg,td.visible-xs.visible-lg{display:table-cell!important}}.visible-sm{display:none!important}tr.visible-sm{display:none!important}th.visible-sm,td.visible-sm{display:none!important}@media(max-width:767px){.visible-sm.visible-xs{display:block!important}tr.visible-sm.visible-xs{display:table-row!important}th.visible-sm.visible-xs,td.visible-sm.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-sm{display:block!important}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-sm.visible-md{display:block!important}tr.visible-sm.visible-md{display:table-row!important}th.visible-sm.visible-md,td.visible-sm.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-sm.visible-lg{display:block!important}tr.visible-sm.visible-lg{display:table-row!important}th.visible-sm.visible-lg,td.visible-sm.visible-lg{display:table-cell!important}}.visible-md{display:none!important}tr.visible-md{display:none!important}th.visible-md,td.visible-md{display:none!important}@media(max-width:767px){.visible-md.visible-xs{display:block!important}tr.visible-md.visible-xs{display:table-row!important}th.visible-md.visible-xs,td.visible-md.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-md.visible-sm{display:block!important}tr.visible-md.visible-sm{display:table-row!important}th.visible-md.visible-sm,td.visible-md.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-md{display:block!important}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-md.visible-lg{display:block!important}tr.visible-md.visible-lg{display:table-row!important}th.visible-md.visible-lg,td.visible-md.visible-lg{display:table-cell!important}}.visible-lg{display:none!important}tr.visible-lg{display:none!important}th.visible-lg,td.visible-lg{display:none!important}@media(max-width:767px){.visible-lg.visible-xs{display:block!important}tr.visible-lg.visible-xs{display:table-row!important}th.visible-lg.visible-xs,td.visible-lg.visible-xs{display:table-cell!important}}@media(min-width:768px) and (max-width:991px){.visible-lg.visible-sm{display:block!important}tr.visible-lg.visible-sm{display:table-row!important}th.visible-lg.visible-sm,td.visible-lg.visible-sm{display:table-cell!important}}@media(min-width:992px) and (max-width:1199px){.visible-lg.visible-md{display:block!important}tr.visible-lg.visible-md{display:table-row!important}th.visible-lg.visible-md,td.visible-lg.visible-md{display:table-cell!important}}@media(min-width:1200px){.visible-lg{display:block!important}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}.hidden-xs{display:block!important}tr.hidden-xs{display:table-row!important}th.hidden-xs,td.hidden-xs{display:table-cell!important}@media(max-width:767px){.hidden-xs{display:none!important}tr.hidden-xs{display:none!important}th.hidden-xs,td.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-xs.hidden-sm{display:none!important}tr.hidden-xs.hidden-sm{display:none!important}th.hidden-xs.hidden-sm,td.hidden-xs.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-xs.hidden-md{display:none!important}tr.hidden-xs.hidden-md{display:none!important}th.hidden-xs.hidden-md,td.hidden-xs.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-xs.hidden-lg{display:none!important}tr.hidden-xs.hidden-lg{display:none!important}th.hidden-xs.hidden-lg,td.hidden-xs.hidden-lg{display:none!important}}.hidden-sm{display:block!important}tr.hidden-sm{display:table-row!important}th.hidden-sm,td.hidden-sm{display:table-cell!important}@media(max-width:767px){.hidden-sm.hidden-xs{display:none!important}tr.hidden-sm.hidden-xs{display:none!important}th.hidden-sm.hidden-xs,td.hidden-sm.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}tr.hidden-sm{display:none!important}th.hidden-sm,td.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-sm.hidden-md{display:none!important}tr.hidden-sm.hidden-md{display:none!important}th.hidden-sm.hidden-md,td.hidden-sm.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-sm.hidden-lg{display:none!important}tr.hidden-sm.hidden-lg{display:none!important}th.hidden-sm.hidden-lg,td.hidden-sm.hidden-lg{display:none!important}}.hidden-md{display:block!important}tr.hidden-md{display:table-row!important}th.hidden-md,td.hidden-md{display:table-cell!important}@media(max-width:767px){.hidden-md.hidden-xs{display:none!important}tr.hidden-md.hidden-xs{display:none!important}th.hidden-md.hidden-xs,td.hidden-md.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-md.hidden-sm{display:none!important}tr.hidden-md.hidden-sm{display:none!important}th.hidden-md.hidden-sm,td.hidden-md.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}tr.hidden-md{display:none!important}th.hidden-md,td.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-md.hidden-lg{display:none!important}tr.hidden-md.hidden-lg{display:none!important}th.hidden-md.hidden-lg,td.hidden-md.hidden-lg{display:none!important}}.hidden-lg{display:block!important}tr.hidden-lg{display:table-row!important}th.hidden-lg,td.hidden-lg{display:table-cell!important}@media(max-width:767px){.hidden-lg.hidden-xs{display:none!important}tr.hidden-lg.hidden-xs{display:none!important}th.hidden-lg.hidden-xs,td.hidden-lg.hidden-xs{display:none!important}}@media(min-width:768px) and (max-width:991px){.hidden-lg.hidden-sm{display:none!important}tr.hidden-lg.hidden-sm{display:none!important}th.hidden-lg.hidden-sm,td.hidden-lg.hidden-sm{display:none!important}}@media(min-width:992px) and (max-width:1199px){.hidden-lg.hidden-md{display:none!important}tr.hidden-lg.hidden-md{display:none!important}th.hidden-lg.hidden-md,td.hidden-lg.hidden-md{display:none!important}}@media(min-width:1200px){.hidden-lg{display:none!important}tr.hidden-lg{display:none!important}th.hidden-lg,td.hidden-lg{display:none!important}}.visible-print{display:none!important}tr.visible-print{display:none!important}th.visible-print,td.visible-print{display:none!important}@media print{.visible-print{display:block!important}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}.hidden-print{display:none!important}tr.hidden-print{display:none!important}th.hidden-print,td.hidden-print{display:none!important}} \ No newline at end of file + * Bootstrap v3.3.7 (http://getbootstrap.com) + * Copyright 2011-2016 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;filter:alpha(opacity=0);opacity:0;line-break:auto}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);line-break:auto}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} +/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.eot old mode 100755 new mode 100644 index 87eaa434234e2a984c261e0450a2f4ad837aa7b4..b93a4953fff68df523aa7656497ee339d6026d64 GIT binary patch delta 19939 zcmV(tKR!hM z403H!GIMLm%TP)pH7k~9Z0T1jY1LPjFGcQ=oYMs~BdpY;Ri=KjW)O*y8&yQy#JTr~ ztFhopw=v-_A^!d^p1R6&ssc9|l9OwSm6QhtD1W+$p=cjY_~|%rknLn7d2x=pF|*>< zITnLJ5iN;=NJo+Z^cb~MBKSsX^aR+THbXO%q$Zw+!5;XkS2Z!g(;M8^LZ7c;%j!{K zQifG8^|pH|zkNbr!w)lh>qBE@2oPJglx5w$(y_=9rUFJI+SjGL61Ukl(R(u9q5 zddcb1nASCtGQ^?M;{{OKDW25+5uO{y!$yNQQ;Pv{ghs%7ZsBPZVSxg-xIjb^&~u18 zniwB_IUC}vr9Z{Gkp>y3!V3&id!|%t?K|GJ$DG$yTuz=^_?uBJyI$0DkbgJwW`7GX z8JNn(Q!_yf+znrDkbz)V95+yZ5^$Ki^xz0-tL?KP@MqXB;9qdN!O{d;fGs`4*QI_} zS|cyvG$2bx0AREryNm&P-vZJw2itrJM1lqs(X{~V0D>lD!=y=xw2u74kHI^)P3)9= zC$?{GhLmlh3rPzR1|fGGX*qmII)A6zr@*`C_saghdlmbB_PPCW1iHAXPx$5Z?x~;; z`kO46Ac9O?CWJ~b@s-&*F9YolIVQ|m_zdWiuzNfA`X|6MU>uTV0DnMs6i5$*c?d*W zB)R~EGl9zkh(vfYI1B^@gI9ncLwGfK7zlRm1a8 z4GTm{3M43Sod&%J?omOMLEMQ!mOv-RBn@Z-a2(1o&Q=mIAYh%kbH1u&(j>_=BqDS& zJ!Z6+G=xEfdsJ(d2QYkc7)Ye{5X8^|qwioFOeV3kbJS?2T>+G*K|G@xp^LI$QnV+d z0+k_yi-9ptrU5&n5?_jRGk@D7pdjM%>-rej>Q>_g>I*7%Wzj}pMSmbqB#wJ2*;hSq zIWlBzw7T9aS?qHb+iW_*!^^zywG7;=bv3D`-hb}Ubij3$UO2wY zHt6#o2grm}ZF%rf$t4#4b$;~nnk9ZQa?+S#{deD*UXb};sam6rTAg8{gR(Fo+2{q0 zp||=@_F$zhjtzBjz_I7*Swf{;nH#9{g@nox;i!Pr73zf(TL)$Ei|+3JIzXW6N+(4Z z+kq^vGanswVroE*aDS-5#^e|96SQWi$=?<8f9|!nWmh>U3G*Ae;W04hLbX-GTD2 zpbLXMrso0yp2tGfnJ5HSQbBN`X8B&Q`mZZzIiPXV-43~pihoVUbQ~C61RcBBs)2Cp zMU9nm2S8niKpyiCsaB1jYgVNS1$7hZ8suc@UeOU)AdMB}mIjwnKtDwt)%IC3^r)B| z$?shw;K#h0Gy}j&-sMa7!3X5lC&R(MWU}H=Lws%HZRKv{Bff$V2P7*rICUoxg^=fE zCi}mDTW(uS5r5&x9r==;u_b##Lqyt4IsslGpo@ajV#b*|CdvN~i0*=CGm|Yd(D-$< z=zp!p=FsrDgJKP7Oj_yMf>(k@Q_18>WX%Hcp*Aj&Yc){rgam9io;UnedXiK+PW?`y zk(Vz0B7myp{3(gW8JFymTPNRqRU!{T9D@XavzLNNm4AWTcByyJ+t_gHneDA~xnygC zh@Lsv2%C!KtElE>~I!zvb z#WyG5g?~&e=@&_8m&Qg=3A@UEY5=_kij3{V46G!=p~8e9#50*_K@;8N0D+P%Q;c5w z+J=yY2GHa!C2?iX6zuDMz{XK=tEU#MY;GcHzMcmrkdX)G(NSeTMr;mC@2DdBL30$q zDBUw1Svw-3OrF}*Wq@?$94Ew5dOS^6A*|n1ihnaCas^r$7CMNMVx=t(UL41SFM}je z{kL%s^-2UnCs$M$&vUm<)F-P4k)g~ z?IQ`1qsek{a+F%@`Myz5TkrKWr@e@zk%1@~a$TB>| z-$2w@7MXRC0D?9fv{J8;zuVpUkql`t7Fc(bVLAeK|$YxhA45FLVr9K{b0uftsG!h zi2V5J&a0HE)|eT>D*9sH&vlCwP^~l*c0iXamC6Jz2#=EPQRUR7BvyhFp&6C!KtnJJ z3++;((;NickWgEZ9T9puSf)rABW2{l3j*?UpguqP1x3hLPd-2nN!TQObF7|e8sZ8? z0uocDvMDe@j6W8mIe(R5Vl1|eKuenO%&BiC?gak)4v4H#v?Z(z&z?+mE1L?!E09Qh z%*K#Ev=&ZF%o{X}#ut*)LvB!X=(7zcPgL@p(k^N>;KZ~z$lP|ech;Pcs}f125+;=^ z-(Ui~ig_4QoseT;PZ3)qp=YqC2vMh;fVkRZsux@COT9_wpMRHN^asR@Oi=yP7`O=| zu)&lk?sq^;>a*=>O;G5@(|ya>4A$h&9#n@c=jss z%g|DkdeshD;)G&{!1~)%TU`1bYgGc7cI-&Xh}?0K2yo!0gh|5fNf_(JmnjGfi(*0z z2@qJ6P`XaEg@2lK4|?qhG7U-QJrX^nAf_K191gRH{iwq`eTHI+oZxZ((`sqnhNoe& zBO(c`7oR>c0lb2curcI8o1<_s*6rO;f~QXs8pi|^9q3P&b$Itt^!c#4&I(4)vMBm! z-Gr%6Crw0n%1rG`5hTd9oM`4I4f%_v&;$T6jPJxWn}3^AX`tNWa!5)Rl2EX807Om8 zJ916qf-98vmkKJD;GgZ-mo;@4G9(A8W)L%yOCaAo`B1V<=W@|wi%BVgnAYVMUh5xZ zVLl2a#o*DiVhbpr`j$WlERiy1EPf)UH``J>u0_!ujytTalt@_RDEhe)pBEf7iZWUQ zALQK#Cw~A`s@uQzi-Cj$50t$!N-O_fAtP@aj)a?(S$;IwE!TtcM^_&z@FCyBL6sUX zCOCj*5V*XQ_P<^l4B&lga}lxy$9bG{e$G&_YsK zuEt(8W%B}^{eq`XIJl2iEL%d0h84u9?nEPjSbv82U9v10*68A^OhR)`Lp~P`E28ah z&FWrwsO+Z-p!W`ykosyv>@GA9z*!MtiC=qV0j5DRJ#vol*ed{u1+BE!?xzHyIZ#i# zK_MXv3fFzKS;|lv0!n3s6_@8nZkn+rc>Ikq78I;6eNtxlCS4(>gF=1?ANx&E}(+io#QR%+xW?y-E1w$EH6e+|^Ggz@* z3s*KkL`|6$RQNnDgsXMyN6t%(>^Rv(bM)HKu>)7;k#dFIT;DfZT-D3}@>bI#Nf@T0 zjP+BYr>RZsCckg|JFox}n1b88@CI=cBP;*nmsx>!cv5*9dG@12zjL(;rkFj*Z{W)c~*%)Av^uo3pm(RntO(~(p%nPkp1NImL8 zgdWn0$q=j$eViuUgX+R8VNk=Tb}KqE7Uq8P!74Dku%d`$rpc4S_11%A2@Jx5_~FYT zB0(#OasZG>ZVaKv;B!Ib!N}T|ReueD{Q?B|q+*~*liN-<{v*xpv$`%|{1Th6b>yxUVfJ(F2HW5rqYkF%rA3;&C`ROGcK~!=wr#GJmo1eNu83 zdh?DfYF?$lam!o;h254Ta2bQUEA?Pz9c-Gj6CAyd;yP;!v~-p=MT8npVM)UeN*1h8 z4M1>ntwot?G2}EmHWK#qFJVZ|F1*jvuv7q8q;Qi=i?|5HOgN;yM(2dwnrWy34s7i) zmJ+gA*orvG4JC~hAuqxCdw;E{4${T(py`~8+2tL^Se$^m2V#hoy@dc3PRYUqcyPPA zb=|T=)_}5IT&ZMD*n2^?wy0VoFzKe9lK1+2HtgwfrihuSy?$n<`D#ZS?E$p|)=~z0M1Une zuBDJbl!cTwf~BM}f}CK+8X?XgU?9aEhPHQE^QV7_)!>0^F9>0sFhLTA0;|HgpVWc0 z56W3+vagr5@`z;Y=6`sak!w0;nuGuErXP|#)!5lrOf-RD9XLr@O+vv_Y-8ls-^k#Z zExSe1f+x<}jKw-E9sfkEu#o4`+4#n(I2bh3DSr3=1TqA-YJi`X$Dj~twzmWiLMTk* zwFU@$3nF2unx?NbEC!}xIH#1Q)RPx!AUQOq<D-q%#)(mRZYoavr@-J0J8q7i}HmULp71cDQe`%v>=hKF^x8GV#Dj(#*Rq$r>aJH z@`&-d0l$~8tHh&^B{bslQQ5>7Lj;M+h^G$42^=Ij(mKUWKJrD@-C+X(+{OZLtAWg) zUcwdFJOUX>cYody09j{ok2iB$h9B^f@Chw>&@tG#HHI1FVZ`H221}aPkmgcC$60`e z^G0!fx#H)s^f>PUz_KdHPITpvCoWlH6oizUq|NYJ70n*|cEw8yD9T+0Cdr%=9Euqz z4xk;?nWg<6GOs!?)eV5;Z=}wT(S;mPOKCz#G*$4qSbr=Nj-riQ7l`mA3<@x6o$bg;u!jFEF-^c}YwEkaO_ z$Paq-0Zz} zK_+p96Y5jgNm9ru$#VsB3OOox(%&#bYAJ07?BPhKLB95AdC-q&@=a}|CN#y0g*1Q* zfQQ`LXSknM<|cGm^V%M!0?$uFQ!ukxvh;#v2Y)wBc8v#4?`kP`>-Ry)D6Hade+?fZ z&7#R$rq&8YV4IZwit9IJdbq@ohMl|KEw@p?U8XkMD-%LvVa-5ZNy0j^(z~?Y{=OYO zg94YDt%!3|pXASO9b*U)$NAFR@+vEIL8#k(>{?4o;&WG<`5R z0eZ)ALy#Bjp~zOQDXhisSr2|g*x$*C1!qsb^$MoY=7cY z0?_aPMhw-^Z||1rNjR&a zd;YjOSybrrVkip~h_F!WYW@T~dSMTHxP=~fSfy`al`u3Qo->2Y1#HUrN65AoPLkj- zK8`}>I4EWb`gKQd$C}6vXL<3(*MD@QU&DERz|EA73d1Gj2ZgZv9c_>fhD-|R;2}vQ z%+t(;_W^9Tw=_D$u~bj?IUkmT900DZkRY23H97s|Pp;LLEih^xON!~!M{YDO0g6&>_iwSOlDl&+F8 z?;@Hp_w&*4Lv+45oSfSIJx9lmDSYM!14lw0(qbRYY zlP%N|?ttR3lWGA-aMbtHmUf&7?=P3m^ysOz6%(K=Ij(m+VE9Fohzji<0_;Yx7hz{X z(Akrq(1~=Zv?*+yWeS`4%zr!S47TR7wYn~9BP(ERX_27`9zu!^@#0N2n;+633%b7$#gRwSaEzWe zt6>U;){-BOC0Sb-RYy7InexWdKwJf4MCu&SSsaCudx@QUr0@TK`d^$VK%km?ZmHs3_3 zZ`nj=z(NM^-)I*dToo?{7&WD-HAtU3Z6p*G8(rde67yuJ&L& zlgah6GccX|AAc8U_l(6eUFuREW`70xm%lg^H3SwpbG%?ng$5oFO@O301M$lWSi4nm zh&+jyrCgYk##&F{b93mwO_H}J18N*5&ke-Z1_Jj9E~h1*4xrLd%0f(jUgX~a|M^FV z%ZQOPaN$IUtd%a0qSeAb3fzB`mP#8yDIFRpxysE5&jEGWEO->b50H$$f-B$yCA>A;PQYF=qoiOlt zc1K2eFGlz+o6mRpb%HPS?jI_D0qX}VlEw*K^};wnh&H0D!v3kWi@ReywBQaJ7EA(z zJ7bMontvYq_lgJG*$2Y93vO}=pHUT4CvT%sbr|wgu6qb^(vXl}oXx+FgM(89WU9MC z1S-R@I&JbeS#^3c|4>4!4eUs0U>Iz)Ym#v26aP$1XbNLRVm4*lj{wq(AU`35nGimF z3o1QUjRE|vyG!msp6ioxc#$yEM_?rga(QxFHh=1hSu>0`jKvFwW)~`#Di|AXmEa{g znc8G#!{{;dG2WuufnF!f{auNgmJL$P-{^6W+|(}E>h}OFoc5K8(_STnuWTB^+7aRn zYQ6?(NsP^A)^@*SVR5FONbo3`$I&lU#Ox~Sw#8Qv_MOn=k9;2R&Mpp@19CqDM_2^4tI>iFVX3_yRzC0 zvVRzSphv~J+1Odv!SMG)NVS-~Dw`5h{i==b=%nV78X9mmt}xSMk4iG*K*d+IC4bST zWAD6Q8eY>R(dQ!wDmq7}uI3mvj-l^vKFvfs`uPH-kbb0KIqPUxLLl)SRgSAl143WW zvWS@Z%ZTnJs{N7yw09RLxg@jbc6=jLJP$SpIOnQMO}5XGE!j0yE55d&C+!A83d_W* zPKA;9j$%se!%uc6$n1QJgexjdjDNgqC=X(w1Sa`mQYC$~ z3dLRjt!swyY7aDV7(5kV-b)e>st6D$4Lv2@tU$5gm^nF#tE(MtrzJ zQY6V0w|OiX4g|)rF2BIlCOvg=bKw~Cp-)7`@0Kxq*d^e6mNLm5@Do`v5Cw2Uod>Zvbiw&a_~S7#6izQMD_pyiaHVS0#P9TEfRq@{ z;TQl{%54?to5M*x1mC?9x6=q&1Cv^K9-#r$P0)!@{W!9d0WmKNN`FVI^~7FP2^1_) z$Ye23t`zeIUaWcFaB-9qY`=jHqcUjS(IohPYIgDXi~gzv9RaP(^`1$Z_9cgksHfrR z)LmBQ2F?@K3N8RATPs%*RX7{Lty_-eh5 z;f4e7vVDl5dk+};m>qCB-!gRU&4xToq(cHhV6wc^%9zID9)H7!>yw?QgexZriI<5U zg1M@16H+il9zQ|*Lu^U@0_O;?S~0A)n?^rs;it6u_2eFVUI3gIMYUu{Nu0G*uo1eU zCLd%We)~ zbZA)s2!vo+M}HfE6v*Rc2x)?~sPQO;VY(x5bG%oO^0C5cEiv-P(PEjaEXs~l>0@oG zY0XS(I0BWmc?8oHBkJ8A-rf(wv!}K z=?YC2lhK-BlC%G+2f#!0LtZ5^*?`RnSa4Bcx%8}|B)TVFiEx$DXS;l_U+<+zt$kM>Z_G%g98)wBe88EmMsEQQDBve zi9NO&J)BD$QXbUk)hsS)7!`A&XGtb%W{E8mNzEw#s(uWzjo#@zL&7Qfr=>^i2EaX{+=SSR;b-|!XD1Q%rTfreRJ<84f3*z{=lcZ=K9oglG zCP|s3>U%hTG@OhI3-D?V5GX2vfdOGbg;eeZQvN?;+AxGkle0_fzEJ-PxU8OlO$-MV zi~C+xk|Y6x)Wou_j33<{sVFae|4VfM|DBLR53qMf{EZbM2c{E zf`54yhAST$kpoh;U?j#-^~fQHvQMpo zAmKEtio1M`tjBR_`HDMp2ogWVzRj-H`Rjz*z8yh1T{UL+Hn(6zvu3( zA-rkp9_RUOewBy$_r>?r-1modAB6+mqJQe=)7%Mi0TC!PIC^pzj9EM5Dlx-(7*1=} zBsm0X*7O$;%4q=wa2zX$ab2M1uxIFLnP!yH?)lOWre=lIrPscosHE4skv?I!%h9KB zHqp=R)cd!#_~}vBpjzS*@BdfC!%}0CZGYhtN+VQT%XRc>CuO%t|9Dku4xFJ z;2_V$C@yH4C(8>$u?tkKvY&ho9-eyy21$fAV@w?Kp%5;)o*ttS>$#0-Nxtf_2Y9$auAb+`Uoii#nF!#4bj0c$5jsc!sbmWOH*DWP54Ot$E zOUGYsW^`R3w-+i_MnfOQbV`!2#8lK0D;A_&S6Yxb;qt`d*AQnLM$iPZ-~!oR3f`V( z6rHvpHJTDhJ+{~RBR2LfK=m?u(!)DfnqdDzFT|Nq1qJwu8fWdW{1s@58GmYdN}bag zuz*goL0F_Ss@(-F^`x5s3rplS=#uL;8;{zu*iA65R6wAPg-?5-qnI>n6KWr$d?Ku7 zbkU!b+k{p8f*3pq7A^c3-eHCmuGJalHDZtYDeYcTv9LIqgwDa9I)VCKti{d|858dHrBAUwkDIa+i}YPAqx%bCK=)OEF{w@Xz9j+{Fh4OWr3;oM zN7GuP(pBe^WLTt~=xS;He zr^g4$tebC9-7qGL0+j^&{_!k^Mn9qb#^i)vopUUQW0RS|Xn)I<|0Kgj6Z~UrFy#}v zcVPYEkGi?Vh^as?fN}?37HpU}V^p^@nVsBgK%3n9O2XZgv~^I>7zHNEs!qxCWcf;J z6jYE)xT(=TEnsuF$ z?<2!qKp$-NLw{N%X;9{2z2ms4bTBk|GBErNa}n6qs^Tm`b;co42h!L=6u9Eih_kykEmZGvvA`GPRhW+p;B^?2jDG8|F-XEn zs3<(nv5tcBy^zx#4zprX)Rwq$X$#Nk=Asa+NL|68ENgKLfm=vvT)*b?mN z&1o#~#eZU%G0caHqRru8y0Iwqs%?)*;ek%@?EGbkOQf(6-_$P(Ti~=P_4XrC^p{5l zWXg3K&Qz(re_r@)v!cHx`G$4i`|=o4rT_#e-e;cLl|RV$3b#x7gZ zWd*N`&(H(BvqQm;o{#_KB1c63&ZwiCJSsfa&wu{uXbY<;7^GkzL6}txKu=w06#Z<) zW-^q8AxNI*&qgBvQ9{D$J$1;wIcBtI2Y~ZCB2t8<+5nOPOWZRpf+z?_2`L4<8Bnp| z_@Sttrm_%JT`V`jWnm3P3G-yZ+({r;FH#6aupP&+556$Z!~1>)NV(c0XC0tLw~h|zS{&}cL%~Sm;+wvF*ot>2=Mk1>~P+@hblfqlsMy`5t#h2 zUnwM2#k%v#6;iYzY8p>Q^wvImh|nLBk$(%jbS$qe9ED;~wJdF5L?SPrdhkKw4b7AS z*rn>gJp&)0Hyy;pya{h+{Lq&CatlpG zRzQQ|4@mU`rs(qfFX;VY2t=jXrt3(RgRZV@OEoFNj9Lj#MYNolAA|)n5W%KN0Y|AP z(6bLa?nEcHdTc0>p!c0cpU=@Goqvih*_`k#R}2SICtP`M4lJ+a*%e z6e<5M@&|s36a_g2J6-e?F2b5;D=F9mCCHujYt_!c(uuREUeX`-PJjkiw?1MqdO96* zhNy2u3QLXlvbw@DcUycuIpVH%S4~FZuXWyFxg6^g&0%O2a6v))9Xwx5uQc6DG;#4E||S|IcgIn zLgOi*B$tfoLCZp-kLK(NgMZr^p8<3rVlHCR0znW(sGeGlQeyTBQzAG6RAd7 zNclk6O3*V6GiiR2bXm}%*=TD8NQY3*@z)O*_x&+9)8OC)rG{DDM1Pke+;Eq~raLFI zF#gk? z&AQVF2`Jt%M~hvM1g?W{YBE=~}YER(}m?OqHj18CH`ibI#~ZGAuEdFmqe!Dy zWt)R}5;tmWq3;N=0B_wBMQC4cGa#)Ai)wH`sx;_uGFQMw1sx}ARA1u_CZn(+$d+nW zY?w&x)4N}5$(rHF;`Yk3`43@P`HY~U8z6JeK;&D61Vj)<8mh4URGO!?I5cRA88)S3fcmpjcC^t5gsbK{JSew0VJ=N~hiofGgh?hJh0;;M2 zfc*1O1s=kNn1s4qkOGqm)C{U|Y3WGmn1`&3iD?EWX{3}Sk0PUcQz}-EcSehoFR*kx z?8?F6QGZ~{l3EtHnjS0gaB?Y|D7h{ZYPIROt4aK812y#tJOt#l63T|L%dW}REFfze zPmzb5L{x~%N+wS9@AoAb+Mnu;guV;rn)`$+sv3M8RAu)NJMH z6#En~Dy3GX--obPZPg!&r{-wJkLK7fQ(%0vUR*panS`msHtV=DsZKMUf+7bER1$BW z^9BTrY;xqqWuSr*A@u~sNHPnH6>x(=Al93xpcsb&-owuVdz;9-{x72*@}JQF=bUw9 zRev%nGV%~2W2?*}aD^m$sepNbEDPzSi}n!_941`gZN4VDU>wkVcp#>5g@{9{(sv`m z$@^E3IYg~V2Wj!qmP3e-5t{-_bMKbthn*c$wg;2rGw%#6whoaMmi|qfk~fda6uN#`dor6usf0fZiufY)iZjGQ2jOZIEPucd zOMVZ=1^{9sgfIfE$&yT%0c&H;C^jIt3<3(VVf+L|8aK|nfiKmLuO$GjApEfdoDa9} zAmGWwp;GH!N)-DW>%|fTL(h*|D>pG%+Zq$J&{alH1;#a zg7EEh!vr{|nOX!l2!_sS;H=6#I)7==KSp$mMr)RTLkw3ZnCby$Fac=-8$9AzWNMfB z5R8;C+Y(nJX%mYMi<*{Y2t&^bdPz5k&JIGb19KR+te7rKgAO1PASwmxjons}u$`+C zWdDbsM1gE-_CV~b?9O59Jd+X6}?xmP7Io&mLD*T66T%zy6#!SzTYbSmBr z!DA|7&&JxK0r#(Qu&68rwVCtd6C(jrn4cq}KH`9EqQ!iumXc1Y|%a zTd)}^IKvpuJ3O3w^MA@XJQ0@2D^^Ebut=D$rX6051g~M2i(!yu-7Pef`g7V!)IkXw zT;L3lXiZy!TwoON5|g|mzi>n~S~)joJF@peSK@8X?%&`~(vC0Wf^arMH;9SR)Ufe7W zDKXw`?hOOlSfh7P z5n4e+e|(}-4R8Hskc#zGq=4N>+UqH}&g9eQ$3Ch}F@K<94L2SG;SR8c$%a#lbs>Vq`XNmrA3?rk#-~o8%RW4- zI>)2d(xRYX0^GD{H4Oqakc_6u!A4LuT za$h&`h|;hOR@6wtApo<*N#}BmMNH?8Hh&qOAx^sh683Q-Cz2dwppyY%{&7uqy2THR zK_7x8?e)64z6tSmv+WhVjtg!)MbNQ{JAuP2+IOzsGH_2N9;|Ybhgg#aNcLcubx9+c zFPcla2E`N$@;iw&9~d@=-%Ly{oJ~m^IMfINaUUANq%>8LZsc;wp&24`Ii@K4+p?Hi2j{T#GRm@voPS|BL?jpg14mbOg(J+5`wcm+YsJMU1_Atu%Wv49 zoNs_Y;{D7CC|zf3?JIpR0|_W@aTbTV>`N$SqRrL|-Nv8LxSntw{P@f=)E~$*S+L{X z>30CKa76xON3?M=B+SWukxLWPuR*OM_C+R6f^=sYCr$@t{d3jx8X?fG@P9OM?3@>G z6n*T!ouyN?1-0uQEN|6naxZfXoKz0}1WiB-$%qvYX)_($p!$SIAVZe`OiYP1ic`=!o@7GuYJ7=^a9Pv#Q~pGdP#r$D zaMN}G-UPBWOH5_xdw+B=uJVz2$3;#=B;YEYJ9-?uHJXY}k^6jO z*S6r%ZqNmdlv2oHQ>0qg!b)REvJfC`&Cyu+2HeWu2W5~FYdc>IHpnb_9s$b2Vw8JWYOhni>!-Q6&agyD1TYs&1b=^gjQB_Qc0q^io zLY-_{Q)rfwF5&4hQ7~2P*|9iMga&Di`fxj@@q6?1NF$a<15Zu@AwTP*1?Y4>sZj|C zd~nKZq>$V5sBt0ZXH+cq)oRDkG6ciO7IVjqH0_9rbQ_dv)G>q`l9#@Pc+(`OWp^ufnN#q~3JwfnZ3SIrB{&SS$AHib5a5K$@oRawbBAb9|Lintu(jr5DyeDw9X3TjaQ3_84 zh1(1=#|B!vzMpZz;JpGPWFnhDVo|#WEUuHn3wJ5*P|aK&Vqkb5ir)ZF{wdy~3}n0$ zO+$brTYpGzq!W-BSKg-4c40JoWCi0855Dm^PCo z!BQx|f(4Az#?gg%If^(8z%p{^=J5T!ubh>YgG$q=OOM%O%sWDgbxD9AxhBSwKq zx1O7N9Bmy%1pbM;Mm?#qg~pn#<&{%{fMyCb%6~YgJSVh-?P`$fj`S_6nA#@@$Cy0Q zRATWm-FARQz;w93aiP*PaZf~KU+vI)U@x2xXlUw!_RL_yk~v@eF=#JqK>kF&i zw^iy0EzOm>(8NLB50uq!DDlhcB9kO?*oIWh*HBVB6{iI*xGi@+CQM5BX%HGiUq`WqnFwHy0(>+>|8_I-AqRzd!(&uT72zEAhJ*d6aW;jsU8m>rBkj{rJD(3(68% zsu>zE4-_!Nl=nycM4<^5KE$c&L01bAh<~>5GL1>wI>gyV37*IZ0>=ZIGvl8!JZrsu zTN-=kyegR@L5+Gah0Z!a-Vwl}F{nX06P>m2=ZftY+@xsM8(72cqeJ`glar%4=!iJT z%M|Ff7Su(A`SKXa#Z}hWWpycAKdEhWEj7z>lzvJZlHID)Rv0pDX(=1w)t)@K(too* z@+d@xe;20iJ1LT#Zy+Ejt)!UBSQ{KkzeyyfmXh%#swu!HX(+&|Dw3vOwHlvA2}`u9 zD!nvAi)-Nh8qW{+cWX~c@R&wsQ6xCdvz2JFtNC6>bt7nUNa+rTELZ#r+=6rGhvd^R5D}WWXoH4b8M8dn#A>1b?;Q9^z~n zptnTEQK!|+jxH)%MehF%nge5l-2>?XH?_Qc0yKaNDj(u!yXG7QH- z5X{MmW{Dk;3#dF$4P5>fhDVHH)BU%HcKQM0xwA2CNTd%JHMFF5_SllG`whSJH z)9O?H5bcuKz7%Ethz`9UPTTAxkn)!1@RN)^pB)w#IMApfUI~X**?(h3MAYwin8Bz0 z(Nb6yvqSY*li{AuE;>dCgTHYshJ@xZDPo~Qx-kI_@~5&Zp;4YG@?XYJc}1g*vk3|r z^q>5RzPhExp#IBZN=!;o^jvbQJH%IyKOXpD#RO zFt8_Woo{hTqu&m}LVw~Hj7C?9{4SAPYZfjRtu@@lp z{F^N_tvY1gmvxkeTNep9I#3+yxQMEwszOwl`?Xcf%jXovQShKFMlv)If^5J=Pxll* zszm;4qnvvZ4dt;hxN9~kQsm5-M@h-cHm6x|URA*mtr8(Y;(tKtdw73mkP#zZFA0Lz zMTt~;u*bxb%DX@UI!2BsT#r#U6@zpv=G1UhA!+prP9dfpWJemoLI>f*UJCOhKzVMR z5QtwH9vyX@Rr~?E6Aweu;O$GOUl#^THZqv(LVN+x4KMNv9A1ZkqJmViZF0RAj-gCM zXUf0JW}7J?EgbIp`jCIqZ;GUlV`#g5$?ZUI;9C$Fa^OW>Adu@XY1&sxIKfXCXJYEp zfkxJ4f+#PP!bQ%Y2LXDhVNo!QviDAs(Vpfh3%+(6d&I$=Kyt$K^)`2C> zVg$R2!fN`e)Mti_CxXE7hcbl%EjG3P8<<4=1ns+`@=6z4WhZ|Lw{JpeG9CQ;8!KIS zJ5#<8P|q0%gAS#8qaJ%ob);>w41{K0ObSyKFvA&*o9|>-W$PQ@BNA z^eTXMYybdoPi^yo6hIRI_I3#^4j)tF|3o`HRU8Y*L$47MS=Znd8NTlO)^0&5q;Pmo z{XnH@GvX_8NeyT~H4pB}IU-C@@Lkd)GiA;)MDP@0r$dHR6WSZpj+VboK+>u-C zbNi7*lw4K^ZxxM#24_Yc`jvb9NPVi75 zL+MlM^U~`;a7`4H0L|TYK>%hfEfXLsu1JGMbh|8{wuc7ucV+`Ys1kqxsj`dajwyM; z^X^`)#<+id0?1CqD|0v&1X>n?Bx;9FMzitP;%Ot=Y{=FK7UF1umbCv+AvtGd9SmIp z2JmUi2dMUF^6%$>0rnbZlXey-1@wb#7vbgXJKZre}WT=ZFem=k*WX7C_z=2sttV#0`4CwOD`S`a50Qso`m=C0iRcixiZA;Bc%s zQ6OWB$mL|)@LbVMZ?WV8J8DqcFN&@BZvAJ0qVrqj(s4r4QBXVfm@F7plV#cG&@+aKIkPnHAL}3F1HA)hU~+f!vO$1#Ew?1}`G9l)%h^mai+5Kwy1+I$Zaauh0oN zm3mQUQ=`8aEAo=0zrm72grj|c8&W!-^+^6zMgm-+SpJe{_P`h~;t1=21VLIQ5n~@Q z5Y=~VMN|LFPH3f|g$TrXW->&^Ab`WT7>Oo!u1u40?jAJ8H4 zWG$)VMcP|!W}{rARc*m|5wga2AycsQRoW*V50efA#4bf zAb!S~>^e$^$)NFU{8zF5fH}*!%vyg~LKHpg$ZRQKYMvdcWmZbF;8lRL0)|H+u5;Mu zG1NcCyfcKT!-_7m9(`gPNnzRQHsAxwl?E0K9-MSP=)i#9QwMljN+-i`3Tf*srV=iQ zkMXP$kl0cO2LUovU^M`kEm8_Xcyi`fNCWl^N>H$6BSK<{e3P$~EwTQPp^$&Sd?qzDf1|kCfiLw6u{Z%aC!X^5CzF6qofFJgklJV3oc|Qc2XdFl+y5M9*P8}A>Kh{WRgRwMSZ(?Jw;m%0etU5 zBsWT-Dj-5F;Q$OQJrQd+lv`i6>MhVo^p*^w6{~=fhe|bN*37oV0knS;dkx6#C<(NN zI}nWp6jx1mr(mnF0)WeYaNI$luzD2^AUKBt?q=o_DJkNN;=(B2{6Gn*2*j!f*@cDK z13cmGh2$<_B~Bpn>~0k4Kw^5W!x@Lj(Hg+Dzdjloz>w=01N0?@hkylC2P!{oXJWr( z_7KQCg2c7s`oMQNPGNtn#S^?ZJ3L-0C7qjj(mHezM5WS|T09&;8wLD{Fsi7lxb`M-)wpIt32BTaYZmnc{z~1!8HWhRbyO4IB?bk1Uq5yyVDo@t!3K2I|7Cxgm z`xkP8K|?&c6Kd&-MdG3@Y7ZRe*yoA2sQh?y?kRzd%tWG_mNC4;j4#j1ag%T=D$KYT zz$60Mk&P293l`hq4TG~n8uiG$aK!o!YY7`o7ie&ZOX@cl#uZiQq{ChC_~8R+CK!Y* z855o)1BmI4yMceNl#^NNkQ<{r;Bm|8Hg}bJ-S^g4`|itx)~!LNXtL}?f1Hs6UQ+f0 z-X6&TBCW=A4>bU0{rv8C4T!(wD-h>VCK4YJk`6C9$by!fxOYw-V#n+0{E(0ttqk&p= z>WuW{NvC^s?BqeaT8TrMPYwrC!y9#Lr3`8Ml=LZud30-JoldwW7Mpgb6+y9?hzw8y zd(GDF^vh5un)8xA0?6v%cmm+YhF~t-1mqO>g2M;*F z?syEvGv`mSj2)`}fW#2o+y@H&GarCRF^luD*n~zdpn0@rdbI3U21!jD3-Q?*gBTIU zYFmL9E_dc72!v@r0n&z562j=IW_*1>D+F6Om1{Rcc%+z9kcI604fO7Culg*?LmbEF0fATG8S@)oJ>NT3pYAXa*vX!eUTDFiBrp(QyDqr z0ZMTr?4uG_Nqs6f%S0g?h`1vO5fo=5S&x4{6F3-Pl?vNGu;- z@#DlNgOBe&xc>u?4qmv3<^!j0gJ|PV#*(cE8oy~g(HW}d-B}M|GRZ3*nf%N)VX#KA z;$b|@A%xp7tcfzcU4dgd?`6D8q^ulRm{^!wRcGl>60da^HEGm&P-9lvRJxuuA5wn- zN&25bHu6u@xM>Zf{X+zfT!--!+TGM9s~fq4R-+V z_?|UkBlrhkkgGie761`vff9fb(1CvwumM^988SW{hp|mZuPsNdYmdXIs@pMCzG9~Q zg-jR4!%Vx_vpN>z%Isudm3DmI{0 zOF*c4aZtqKw#mh4lZwY@6^6_z-;`CSD5~~RRWyR9IR#9T3aw)mO~xvaj8uQS7^zYr zQY=EF$c06)iin^U556ixd{lb)^lM(FfF==3z`^eW)=5a9RqvF-)2?S-(GhS;p( zu~_qBum*q}On@$#08}ynd0+spzyVco0%G6;<-i5&016cV5UKzhQ~)fX03|>L8ej+H zzzgVr6_5ZUpa4HW0Ca!=r1*asM@)$8#hw6Dz$x|tp2NSOdXxWj2hXxbc@@`=03aFw zF*E>7XaJ1R0O5cEg8%_$0DqYPu*d~vz$h*ODR2l&fIwUU;@}LI09d#JWk3ZL00>kM zVxV@F18A5UM8L%+1^F;7Nr7TS0*Md_WI!Jg09r%=F%Sc!Kn#%p7({>I1bM)?^MKK( z07FiE4LRI2=GfDkBTi!rY{C`=z_2?7fw(LUfnZ~c0|ZzW;=r>O1wfD}gn=?712B*b zB0wz(0GK2J=#U0NKo*GrJR|_nkO9HK_znj+a5qDNrW_1_;9>;==qMI}K&1)=7*GX4 zKof-kMhpPBFaiL;^#*?cbcy@{oZbW-i{wWZt?ATHQkRM8#IoW^?;cd~c~gzeO;U(Kap)rmC3$ delta 13843 zcmV+uHtfltodN%C2LCny0HHOJ1~vqVm>2?)Kx!8;06PFs08;=U05AYB05JeA05AYB z05OrlpMQ6Z05x*}HF*Fn#uClxip|my)t@MI+lov3!4O2^!Y5D*?oI9rhX6vuY96-q z{9Pebpfjegt4i6n&H1w^Y9{nAw}FmCNB+`hU84VvjX--*ag5#?iJEE zX6>fZuKj%WC-6P~|Bjn#ywq@^|3$h%UqXyP-2N)J9Y21I>gx^T7CFKu1%yfHgxPlJ z@PDg77iImIa*pBwl0HpuJcdkI>5t(A%q7;>{9!)Jk|2=+rfP5}z)TSg34kHdJB0SG3=F0Qs0l<1B)wsC^IRvmkg(-{-F+roG)6c}nwt@JH(SV1M<2(Vv49y-D>CP zK96*anHJph>9BR1TQ~gQK);)PoOA)~vs~x770K~^#mpRb zTzqC34}11`hCe+Y=`kW^nEv^+4|s>P&q#Q9{%Q0NIE!nUP+7n@oc0HP!Yc?KD>g+&I?$wB~wD48;{;Pgd50eE&bq-$w= zP$JSvk4KLKbY zrX8krSTTr5qav!Lmyou$nt!C^rU;-_IW#4L(O|QujJhfBr+JVc1~Gyv-PzUFFBp*9 zIzC)y$3Ue#NRhNi+_qP!)_#4dj>PkyI^GrIXbWZ=tD^R7TGup@S+B4Q;m(%DIq#bZ*F_DeT2jIjeKQhKPnF}) zl{Ue^LzXEnxl)A0`XvZYc@Dhup}%qp$Rib>ic!6cRzGZ@%SYQu@bXs52~;kcQqq6$ z=yVMn5r*E2kkRkNZGYXb_HAO$b^!p1c63hRz zRC=_K1N9OX8t&_38i#@mLIhWwV+*N=qlS#L3S&lgMOxEO9uYVPDI+&Z49w;Y0{`cRKuN}+nhO?~V2x83C`F*lmKH+jt04{n0wg>Y_W*Nk zv7xqm(T%zw!S3eQ`ypU>>zbk>T46#nl>Lsp1bDQxSDeXP1lNO@viNEc1_H)|Xz(R) zbOcKQz+1CqwSSYb2ty=Q6hlB zIjq{Bf=F|Ue3VefW5v;ITlX^nci;b2U>qDt0g!97O2X;`6Rm z_#Iho2jlOaNxgvYbK>LWI*QOlbhyM=a3IG$?V$ZjfQPCsr?o90GOxTk{ZRu6kTg7b3d(~yX#}*?AIMfSI^FX;7B@cJ zj|8O`H31C5G8`KJSRO;LHxOV=Et-yM$O-^HJATki{lYbzZj?w9t(r(iH1+SL7rhCD^w1prXKV;y@Ea^<1QK zd{f90BH0?cMIsobtIRNI%yI%x|3qk_ZGU7VBs4`E`K3Tgsiq>)_sHf{7r`GW^pi}P zIoY(*PQXKWO}v7n;Fk@$eMGn|qe4mHgmQSS90<)RTqyz9XH}I-g=zbmjKy`~PtO%G zR0lq|eCCZlN{7`%=$2x0pe@rt7PJsWpFrLdU6eTR<+;Sx!r(%mkMOj-g^Svj{eQ>{ zu2_X%GZ5ib3J8G=_hdwb$AObyu^^bN1(1>13c1F>K{t!P)` zBCV28&Oprap`sZHC`u^&UY|3xuzyeMdB03?y~F5{T@olnl%!I=av5WL%^B{38EudGnZyi9wCfE$pD~cq7$*HkQ@5-py!OskxkprF)$@ zLFppgHED0Ga%# zRs)ESvgpxAdk&}R_>|llumpmicnr`UUCy;AKo|jSq77^jrmc`>f}`B-tWd2wh)DRg z^Z;|LENK*!`=#xZQ9d%iB4k?)D9I4RAo2VRFN$^2kGx`2vP==nvw!_A8;K&7I<6Q& zlvO~VaHZSUy;1@EMU|Aqd%+)!8$uiCkVvALipHh#Aq7dbkJVwQ3oUmdz$tenpyKS| zoSB)gs-D#wa(Ao0&9l4?a=z79~L3D8x_69%hYUD`@dl8H7Bu-bCEUvz$N&E7sDo+DnAZL;d5 zzU&n5LR<~U^?zxJCRNToik(p%m@&kkEI*)hCUPu5Z0R*Y+lK=vX%#;ci855 zH%XZjTwrd=b4zf_vZLm4 zL;yWY5~^RUz~qMGVB|!?BlBDKxk}qnf^4HC2?m(cdei`OFTMi6s~;ZPagxAFx`gDN zNh=V!SnTuE<%}&j37+U>?(*h z+;3hq?SC{`f{A4<(yNP?dIev)e7=yCe(r8fH|q|*+KjwZXP*rUr(o{Ri)70V2Ww(~ za^u*L$oKZc7_1MuW|R|l9K`r9Bz+|*%q_&1yp2+x3J2gO4M=<@f>@Fy4haoAV~i_v zEsQ~{(1<=hJ72p$EvGSPM=B7=<`zKfpjC3nvwz^riZ?UkHfSlkrSth1>6ai+wu{>U zF3C+Y&^~GaYCS^2ru8whP3WLlLSRhd(Xb0Xcm?U~tPNzDwC-XSP7(>XSj!Phvl5GL zEEC|pt5I-qP&P8)amK%kKa6e}TvEjC9d()Wxe@%dN z_{sc|>YJ9CmZS`=WAF7iJ0v`;Tr*M1+Q1%K1;`C-WC3=mjb_}2B98%vyJMaKtHr*X zxd{HZTq0W97==_RRq&Nsfo$~mRTe=fXn+4jqb$^c&n`hd&Bz6G`niG-M%hGCY~OSA z3x+AknfYKZB0I?pF5J^HoaP%HU1FCIP@!W&;fiu^?J8TT?Fj&a$wGr5OMGRxfPXYF zAE4|_3ga?DiWHA#G5oIRF$8(SfBx6c>AmHtQamECMyg4h0w;u4sJ!*f&Jz$pCzq5>R1*tG)<+nEp)@rW4slVWj9!WJaA zAREiYs397e8R2IIuUG{*<%JFLcHjlY1!d2yy)z!vlx=N=$`txAF)JSL!%W2r$xa%yO)G|uGmV@ z;WG~?j7UE_aWej|BM0B zF5}bzxDjrqV)pFjMpy~#CW#xJbhR8IGVonCNVi8w4?GgRfdCW)9%w^@yh}7r1g*s) z8H>_4-U;7)GWmX)lDG8iCx2+Ul@ql=B;E%LR#bL~pYXIg(cJqbf8cxf^4LB5jaGx zF%un|{`Q<_8ulQ83|HQd(^)YVu(oVs;|ce{AId19R6;!H~SA z*1G@@Etj&r4TVXPmWZO-Hj@b&5J5Q}c%Sl?KRahg$*jD4DFzK7vqy$0u+|`}>P~Q! z$bhp7PXZY^!j)5szl9|{szoUnxq&=S<4oB?On4m2LBXJ*Ffjss!mMZjEj`$7oe*Uf zLkO88Fk^NhzjK2ekvv&>Q~q#m_0WgIEPKfv=`bFWC{^96m(hu zn}svgl;AUJ#F4KZ6cp73VQwn7WBSih03$#oh}9I* zXg5xJaB)E4o|ce1VPoe4;YeHP6n6_~&2}PVk^|q=_r5c89T&4J8P<{r%U~cJ=r&Wa zc9<9qAp~9_MvHm04j`he zB|Xa_2!;D%5YgysIHZPz0^LyNc^y|18gg=ROL@z?MO>S_*sHdq)wu4CR2{oQqUdPY z0SQBQN%kQCd_-zDT-MlW4zX3l;=}+S(2Sp+G?oegx)|MRtNzX8^ zD(}lhQh}ftRQvk_P`>31Zge;S&@9l-tPa7dWzj*G#4=(~41}kXrLe+~Et0|P4OK8O z1>|Nl6MqCxoEJ_w3CJ)3CqwHalE4@99|k8P2Um7G-x}r0AnZ&Ez+FQySsLllKDJDG zjeK_(@OUodoYNk-Gb02dR___G6Z^`wOG(CBAfsXG)f66Rv#ncD{m6nnp3Im*Vc`1J zC7@dn87PcUu_sa!5wJwkI0R$g4INNTZYyeIrGF5q%$uz=v zAEe*Bg+fOI+w)noTx?p1`1iU%gfJT+9rn|eAw%nfL`G@pU??IK*u6E~9O3?%$vIj_ zM}IV&Kaqe}9^-+6loL&cr6*~~KRLbn;A|?5*ImPj0Zq=HcS*NRF_ug;fSeAr@S zO&?4sOb*nQXDNyaWg5=&WZ=g5wN7JYG~K+@qx~0cLuapwRZOh z7?)O25`fW@y{v`i`6Qql34fTd zh?O+x&kuK22{NfD<28c>Tb0EOj0eWYQoZf+*KVxX= zR;f!T!ox!wizNKcr&G{D?tlMarEbmt4@!5PT9~OPsLm57dUQ_#8mr|<3d8`T%RGcG z0|4LPfPmwR`g~hZ4iX7kpZaa(8lzYbB`r3kbn4D$ngBpR8~fvWnkKiHm2FIekO9jx zjl8)$Q9#YNkZ!a{0e`K@j=piPy0pVIgxKcnQ3%C5COU_b3b8%vVpSSoguSq6}>j<$=qv|A+(0%v|4q1{D$AXktBbL4S%ab#sU`uKDm!5y)wP zJL*ua5&La9O4yY!vhC_g3(bAF0Cg$HgpY~{;S-#4R60WVA(#m>fzv1osE;Zfg>3m% z9WOWvJ&8UVCmD2I^%nf&*In3Ys1DP0YEAK0%V@RkJwczOM zg_%)qr=O3itI#b+4TcxWFX8K^i%4q__GJO-xnjssK+nP`24)z>1{PBdb8 znj<8k34GMek+ET_ovcviY|}{s4F0-0S74ur2u$TwgbYnfp$EQ=y^*o#VpMY|QZ zx4?+n2!Ho*_g14dt_wXNdln8iKPyiAS07J#7S^)u)e zW@))AnDnPj=N}Mo!#0)0lQ!eX?*4&$OopV3_kX{Vjzy9iJ1~(HzY2PX7oq5*lUQL+;Tpgzm1II)bxRRv&3S`#2x5)!g zWx^OEHlWc#STxtnhMHTBz&o@>YneCWY%K!E*8FI=gDssA$ z(|-={I)+DgHkehI9e+taYTg&{sD&eOl9p2B^hkT@RqC^jiXlQ$BCn(Spfh+PGm4QJ z>fV=aFg`tpCeOm>mk&Zfm`b$13Zju~v!~3ebaOCpehF0FVV-EYLXF?x;31WBTL%p> zdCs~Z3D#ChGZrBodW46x2~uf9<#F;Ef^#p^L%pU?%KR?SNMP!9;qM`x|5w zyw8u*hMBVl%5bt0*v2lpyG3gn522#Uq)8;Hgqh}ipk;>>^urv4To>T0RH>Dm8hc*JB^E}M0yM6v-nkai&C9%QgUF? z^?Pi*z81N2KFhKsK%b8(k%cY;cz-R=bd1gD9_wKu;7;HSvtF{`ekMTSr>F0OJg7^T z1Iz1_!rIL&>hF_M@XQbpnC4Iv+Lob~60Yg5jMS`a4GO}<$T|4qP##k>q|LJo8e*hw zzzFIbT5+qGWDv$99WAk(kV6Te9`ty68@pFFJ{{fxFSh0z{w>H+^XQ#Ujeqp<(QD9i zi2rwNUW?Muj_|L^MpgsqislBRS%R63{jaZ+1&#A3Lq7xgpoe%ukZ;_H@KFj)&Ofhy z|NB)GYRO@zWpE2b2Y#T$$MG3G6L3NM%;)PIvh$Ku@ZB;(zHNV?goE>XZnm`M)ATXC0Q_ITN$IJvTIL*x|xvQCaJ^(W8h}VXlaWvg@22xxieqo!GDu8 z`+av|9BOGnBy@`yg~bz|whx5cfZBQj^q(gPBoK=*sw__bj@aK)5aLKmD2pY7LhanV z!ge*#e2vVpSa>Xp<4ld^KuC*T*XhMaBk!o+?5^2yJCt9i1C4b`s+DKsD*so_;o1s< zr)DFrJ1{RVTGJKw{eP@gwtmlUHry-O$Rv7peGV?^128TFJstE4ttQ5B3*yuJ=GEc9 zynK_eW+nr3+=q~22;0#)`Hn6Hxk2E`J-lH28id3UL=}Rd7}nvx1^lF7Dgc-ed5=<* zsQPdO_4K4JGWF=Jg^fSJ8t5=|y7s(iS~JVHWi`Sql4+IZ-haL@v{7ZQl)5|D1M&Qt zEE!_AgA?e1^Xi8nveAOjAA*P}Q9!>g)`#Cf!JjDFntCpUEnd{k^!#{BLa!Y6E=XK zlbr#bFeuKHa-;gnSW}{oHd#WE%ff9X3mr}M+0%nr?+-RNhVXdREpBL^Co1PGQr#-E zxbe9O8W%es6O6Qcur>`PRgOa!a+hPV$ui6X;wpKRMSr2ko-6f!^neUCH0Sp*Ng9wLMGI};p zGs4UQ)U<#x-BbJAVB-t8d?tJa(|~D3)9}mh@j;2uDW$6VYsuCyP6plp9EkJvYSTpN z+=-1Ku75z9?!oWuk>=`shJF-~cNeG0RqQNsaR77`u)?HMK3*^+a!Dy4R=;v_!@BAH z5}bJyy3hYNskZF}Mcw*t84Oj^cWLl~A$28Ul!fKG0+gAKUYo8-;!n!}0Qi|{%#8of z$C0X=V@Zvh+=wc8h=8PDa<$|q*lQorbcT&-5nngv7s`L zNKXzi=ha`58!=Bzk0< z*Jf@gJt7;4t_2>P5cBBf`?kZ%@ACY60CrX&1xn-nau-Z{x#O)fVjDi1I?5bKY>jWY=P2lqbwccvxNtG;W!lM z*=lfFdt)q^3R5{e%q$7etW(a1kpHIMr}D}ZHXQVN3d@o1P@2^HyEqp?=yP-jx!+-6m?4DV?Xu>KxJC;>Dg-eZ$& zxFSXBLfrb~;=o|LGQ9hI3&Vy$izB}(aZ{S`CYuTZlo3F=H?bZ8TYUP2JfsPKlc;Jf zpGcd#&~z#&Stn4LNQ@2mSBD56Cp1Lsl(Z8y zkgP_8W8F~X#%iM1q>#qb4rKm|5+cQ^Qdn|l5J`nM&wvuea+m1)H_wzXghrpp4WuFj z0>^MNXo-Ql06|1~t8kp0uu$Wa3YJ_aoDUkUR0Ipb1%xJ!7Xg%p*cEej4}Uo>D=vkv zniWjlaA}g?q;`&hNMgvT{;5UyRN>@iWuxhqxt9<`5Ly5G)F9I<@`cW@#z+#|xf%+l z1@@%|BTF=RLa{Q3C`7eCsC@uwQ+$#KW>!~t`tHH0gcXwgvHWHHi}joO?@H>GB!<@;`aL3_vsXx%zui+xm4M|phSGH%RHydJr zbSetWud?hzh*E%Z37bUOe$6VV@CE`$k5Ce{B}Q+aF_|bGNdA;12!C0N2%MB!!HCsJ z%pP7?AXcGrnJDH9WQg=f^UvQqpfn`GbSj3#Wuap3JWQJcPA;PYvJ0r;u>|P2LjXn4 zs>*`M9l(BFm^1UWN*aTxZ(LMZZwvIzTX z;HU-Fbm%OV#$hAJo>g}oL|9B2}Qy2KTo#=VagZqTRx+u zt!-RIkpgV2jV2m`ob7u`qk$OV-WLM}LF*042#e}!9IzgpF0~`4JThv1(bAPlIzpsc zOFuUer8eOai+@BBM8PJ&LSJDon*<*tr{7VW7z4G38{kb~} zBwq{&fN_^rzXv&?OEIFF06h@Fia9A0o_;qUKDeSsZ}C)E+}ZjN3RGjLDu(dH=pnF@ zVH1?Toj)CvSg)E`1N!$<-QbYmVfrj)uBZ@gfoik0N+?m7ztsq zNYtKP3B&-0lYRQT4JBAAE1y9EowX(l46Wck=2hFpQiCa?W6c_TO)Hr#mZ2j+JYv`1 zCN1lhsDDElL2dBp12nZHd1W$6A6qugsF6{flWT;oukph@8@mz_mAAkJ>m>&Ww)0CZ80E%oex~kNXHJnqK zxXz)o13Ji)FNzgp9ApDBkU%3wRR;Pp5t_-mBFNY@H(m2MGm9Sxi-#CwY|y~i1X!Ph zW~MSUz1nAv6Z|M85S)dv!xmc#eKnupBF6`~HMhtob{((T+ab{?{bg`b?S3NoR(}e1 zQM=!_{&8G+;WiTi$VVAjQlwuB7RFW45}TNY@}9`ukhrnY4sTG{fn!=Gi3*}L84$(- zQf7N#@#0V#XzGwVPQDAE5ba1PoEB8Z^t8qZH(rRP&XroL4Ugh(-&^y}5w)Cyiovo3 zc%3ejxnDEbiY-HgP%KvRK)Q~HE05He!=X`3yBTu|(ImV@ zrq7mPEI|X}BT^8C3yOfo0hDG}#AK0D>NYU&8Gu6pt>Tvn*=Y%{WEp1YrNY%zI!FPG zhpW#g;|XVsB2ct6)gVbZ*?;#{UY9_zB{D@Wu7tK|w*;mVMFI}czkdvZlxTApAGq2+3^L?JJaI|RlJOQsrgsXAk6vgV zff54QV@PDEi?K*W>{|;>`JZ$t!&*7p3q({sy-cBFPOVr<5b#9Hw11HI+2rXo434@Z zekC+|q)YDr{r=w9ZzU&$v%4wJQ<1MOoq065I4?|`nZ6vF2~+_HQY*7;mc8#GjomZOnaxr>dd^W-Il zV-WIdcnCH`EE8r}Reup^Tncxmq&dV=HDGAa@iKzkw#nUMq8QT!7){c`#oS_5Z1!V{JN}0it8B5ym zjwoc@Y|hgDhIKHX;^gl@Rn}e|ztr?!e!uHN0~Mii#KSIv5r3H~XwyY|0Qy)DNtfiT zAk`m0NTIBa+?_>_ZVXW{gGen3scW^rZ zwPCJLg17)hrhY22P0gx;9bC7miQnSEo4QBYM_oQ_c~0}dVUk3%N=YR`EJp$d(JagR z=S$E3&A2nfbbq1qGMBY90ljS3M?mG=>j8VBqm_oP(gSN8nW6ar814XMKhhlGBrd8D zLqz|jj4+X;xd5`1H)J|nt^3sOgFX&@jl3HS&lD+N=#suO%aUFE37V~20~qj#)15b< zpN|d<1L*7{JIg-!1yE1V=Vk{Dgku%%=Ikb@4`By&Y=8BNXb3&8cNY`XA^2FC#gjmH z&@bY8>Ro|^Sz&h)!CR_y&}R6Tmn5pSMd-y`&@Pn2E!1{b%^{1=0D_TT+)DfSkApHc z9v}2o{H}q+X>OYT*v)8v(ek50P|7_cl7~d(i#giA5yaQJ`WNRsLGm;5Ka4b5uds) zrFDfTGu_6@i3>W2ZxI6`;gVWsKdL3{>1f#>jen7|&&+IMo(eq&4Y0?Zg@N!yFMaH8N+g*o(IpOb&an_UewQh))It~x`Yz*LGJ3s;rcaW zhoC~T$Xbb5f?}%~wTvg)Zc%A4lQV{4ZcHchb;V^{1_$XpNNpu9FSwmCOEBwg=($_k zsek(n7BlFUJ3U-L={k65Ab@7VO+GP|Q7?5E07DUIw9VYqV zkuyE^A=tK?H;8%2nLZ5p5#NfG$vWOwFMll;9I+KqHgQ)hS4D33306$JUrrf`d|7MR zgIdgbpo3KTZY)qdICzTa-#M+OLHkJ+NwL5li!EX(JgjN*$1X{sI{>4{4(4qDNr+yi z54F%QFdP8M@qmzoecOV2P=G}@E}zNtGkecStMFT1U_45|FH^V1#1#LWQp+K-1b;@Z zQiZTP{W#6}Fn3Q4i|U>hRW6F(0B7bSRHw{56UnW$qOs<00E@jqU%r+_;s3O!PLzcX zN4ZRwqBZ;L)#gAk3r4%x z1N2BHOp_$62`1Cv5!@o6Fwo~SQhzD?D9#NM5u&qQyl@FB2C8&A7ts$qp|l^OtudY^ zz;`DfyA6U<;Pd?t229m`FgH!6wg_(WR(uIfT$!b zZ8GCp}j)x%IQ=%nVX;DMJ-&K4K zkS1q&;Z`1`%3KR!TXl${(0{2DHU6zv2n^kp>GFd3qy&Sjr9i^KlG5)zrRcsrgLDNs zC|VWeDhoWORj8*#qMEU4DUu5vTxu7w3#~)k^+GS@baR?~RfTrsmwHu98`nibt|I5| z3wmICI4KOFjiFl4aVXG5xTpsrbw!}NWgG=XlFSy&5L>z$X21oYaDOR`fJ@U%p;D35 z!A-J3k}GUS72A{~+7u;_)vTtaHP-Abwu__-TD3qbS>Fez?B30$6#I)%n`Xq#7BL1! zsdND{5r9c;ZT1DF=$h(&qtH$G6Q*}y3wTO{4kIA6-T45FMRG_obnTx;=`Onx0E1Rg zMQnm9PX*A)syh$|%YPJMX*|HaC@PjBQbvFFC+x(2p;b>RSEgl+fpFS}HcJwGz@5;b zb~3?xv(7Fy9`R~Uaj)+Q+y**H&*RT|4rdv?_)P2>E~M&&{J-Jq5Rf{T2VD2Ope(ws z*6#pl2tka}bI z93p-b4jOdcMJ8Awbp<7ga+H*)MZyu(LFq6ubc7ZNbCUu_0x1wSML5Cnp^p&rV?7Rr z8xZNnYA_(+qkn3Q3^uUfqaFm;iW(!dSOGePNE2v8ktxLm5~d;GlH&3f09G*GLqG@^ zHz4$(@CJY$08@b+gJTYmGyuW}kRy;=K!{;u1bh?BCxEa(ZGwaWFgFE|4^S0wHh>)f z^9GO23QUOE#m*M3NUTBV<<%)<%j{bIspHHDt{TMy?5g9I9LI7fa_EO-(JX|!20z{Z~ z5($uoWvl?=rTB3ud%)7_oVjHl;4?%+`L4wBvVDEqdw!i}IKz{=|fO+Zx*_Z~-U>X^KZsY;Ahz5Qj z7+8Q{;sIlb1#SQoSO7_20S|BkX3z!QuoZSdO<4fXWCERV301%v)B*VUk~S}*G7OB6 z9~?(P3hG23FL4`UhoQC}hLCC-gl&sV#yt}l(@b4VjinPiFH%7991d|n-xS8^rZqz` zrhgfXkj!L;Vh!1chh`idm}hp_yyOL^AS^jBoZP^#>H;sQetqXpzHj^Hw~FScBRldV zuN~T)bK-+%6c_lQw$@=?sDgVi64`*_#0J(NG%o>xcneFwP+kIZ@DLY(bhHB1pku28 z9Y_}HK(kT>Vvr@&ff}3$l;Axh0oe!;NL)a9LIbJr8_$5!Yz7lxE|~#T$OFueU}5+s2qhr)hYx;X|l - - + + - - + + - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.ttf old mode 100755 new mode 100644 index be784dc1d5bcb92ab155f578f3723524a3dd9688..1413fc609ab6f21774de0cb7e01360095584f65b GIT binary patch literal 45404 zcmd?Sd0-pWwLh*qi$?oCk~i6sWlOeWJC3|4juU5JNSu9hSVACzERcmjLV&P^utNzg zIE4Kr1=5g!SxTX#Ern9_%4&01rlrW`Z!56xXTGQR4C z3vR~wXq>NDx$c~e?;ia3YjJ*$!C>69a?2$lLyhpI!CFfJsP=|`8@K0|bbMpWwVUEygg0=0x_)HeHpGSJagJNLA3c!$EuOV>j$wi! zbo{vZ(s8tl>@!?}dmNHXo)ABy7ohD7_1G-P@SdJWT8*oeyBVYVW9*vn}&VI4q++W;Z+uz=QTK}^C75!`aFYCX# zf7fC2;o`%!huaTNJAB&VWrx=szU=VLhwnbT`vc<#<`4WI6n_x@AofA~2d90o?1L3w z9!I|#P*NQ)$#9aASijuw>JRld^-t)Zhmy|i-`Iam|IWkguaMR%lhi4p~cX-9& zjfbx}yz}s`4-6>D^+6FzihR)Y!GsUy=_MWi_v7y#KmYi-{iZ+s@ekkq!@Wxz!~BQwiI&ti z>hC&iBe2m(dpNVvSbZe3DVgl(dxHt-k@{xv;&`^c8GJY%&^LpM;}7)B;5Qg5J^E${ z7z~k8eWOucjX6)7q1a%EVtmnND8cclz8R1=X4W@D8IDeUGXxEWe&p>Z*voO0u_2!! zj3dT(Ki+4E;uykKi*yr?w6!BW2FD55PD6SMj`OfBLwXL5EA-9KjpMo4*5Eqs^>4&> z8PezAcn!9jk-h-Oo!E9EjX8W6@EkTHeI<@AY{f|5fMW<-Ez-z)xCvW3()Z#x0oydB zzm4MzY^NdpIF9qMp-jU;99LjlgY@@s+=z`}_%V*xV7nRV*Kwrx-i`FzI0BZ#yOI8# z!SDeNA5b6u9!Imj89v0(g$;dT_y|Yz!3V`i{{_dez8U@##|X9A};s^7vEd!3AcdyVlhVk$v?$O442KIM1-wX^R{U7`JW&lPr3N(%kXfXT_`7w^? z=#ntx`tTF|N$UT?pELvw7T*2;=Q-x@KmDUIbLyXZ>f5=y7z1DT<7>Bp0k;eItHF?1 zErzhlD2B$Tm|^7DrxnTYm-tgg`Mt4Eivp5{r$o9e)8(fXBO4g|G^6Xy?y$SM*&V52 z6SR*%`%DZC^w(gOWQL?6DRoI*hBNT)xW9sxvmi@!vI^!mI$3kvAMmR_q#SGn3zRb_ zGe$=;Tv3dXN~9XuIHow*NEU4y&u}FcZEZoSlXb9IBOA}!@J3uovp}yerhPMaiI8|SDhvWVr z^BE&yx6e3&RYqIg;mYVZ*3#A-cDJ;#ms4txEmwm@g^s`BB}KmSr7K+ruIoKs=s|gOXP|2 zb1!)87h9?(+1^QRWb(Vo8+@G=o24gyuzF3ytfsKjTHZJ}o{YznGcTDm!s)DRnmOX} z3pPL4wExoN$kyc2>#J`k+<67sy-VsfbQ-1u+HkyFR?9G`9r6g4*8!(!c65Be-5hUg zZHY$M0k(Yd+DT1*8)G(q)1&tDl=g9H7!bZTOvEEFnBOk_K=DXF(d4JOaH zI}*A3jGmy{gR>s}EQzyJa_q_?TYPNXRU1O;fcV_&TQZhd{@*8Tgpraf~nT0BYktu*n{a~ub^UUqQPyr~yBY{k2O zgV)honv{B_CqY|*S~3up%Wn%7i*_>Lu|%5~j)}rQLT1ZN?5%QN`LTJ}vA!EE=1`So z!$$Mv?6T)xk)H8JTrZ~m)oNXxS}pwPd#);<*>zWsYoL6iK!gRSBB{JCgB28C#E{T? z5VOCMW^;h~eMke(w6vLlKvm!!TyIf;k*RtK)|Q>_@nY#J%=h%aVb)?Ni_By)XNxY)E3`|}_u}fn+Kp^3p4RbhFUBRtGsDyx9Eolg77iWN z2iH-}CiM!pfYDIn7;i#Ui1KG01{3D<{e}uWTdlX4Vr*nsb^>l0%{O?0L9tP|KGw8w z+T5F}md>3qDZQ_IVkQ|BzuN08uN?SsVt$~wcHO4pB9~ykFTJO3g<4X({-Tm1w{Ufo zI03<6KK`ZjqVyQ(>{_aMxu7Zm^ck&~)Q84MOsQ-XS~{6j>0lTl@lMtfWjj;PT{nlZ zIn0YL?kK7CYJa)(8?unZ)j8L(O}%$5S#lTcq{rr5_gqqtZ@*0Yw4}OdjL*kBv+>+@ z&*24U=y{Nl58qJyW1vTwqsvs=VRAzojm&V zEn6=WzdL1y+^}%Vg!ap>x%%nFi=V#wn# zUuheBR@*KS)5Mn0`f=3fMwR|#-rPMQJg(fW*5e`7xO&^UUH{L(U8D$JtI!ac!g(Ze89<`UiO@L+)^D zjPk2_Ie0p~4|LiI?-+pHXuRaZKG$%zVT0jn!yTvvM^jlcp`|VSHRt-G@_&~<4&qW@ z?b#zIN)G(}L|60jer*P7#KCu*Af;{mpWWvYK$@Squ|n-Vtfgr@ZOmR5Xpl;0q~VILmjk$$mgp+`<2jP z@+nW5Oap%fF4nFwnVwR7rpFaOdmnfB$-rkO6T3#w^|*rft~acgCP|ZkgA6PHD#Of| zY%E!3tXtsWS`udLsE7cSE8g@p$ceu*tI71V31uA7jwmXUCT7+Cu3uv|W>ZwD{&O4Nfjjvl43N#A$|FWxId! z%=X!HSiQ-#4nS&smww~iXRn<-`&zc)nR~js?|Ei-cei$^$KsqtxNDZvl1oavXK#Pz zT&%Wln^Y5M95w=vJxj0a-ko_iQt(LTX_5x#*QfQLtPil;kkR|kz}`*xHiLWr35ajx zHRL-QQv$|PK-$ges|NHw8k6v?&d;{A$*q15hz9{}-`e6ys1EQ1oNNKDFGQ0xA!x^( zkG*-ueZT(GukSnK&Bs=4+w|(kuWs5V_2#3`!;f}q?>xU5IgoMl^DNf+Xd<=sl2XvkqviJ>d?+G@Z5nxxd5Sqd$*ENUB_mb8Z+7CyyU zA6mDQ&e+S~w49csl*UePzY;^K)Fbs^%?7;+hFc(xz#mWoek4_&QvmT7Fe)*{h-9R4 zqyXuN5{)HdQ6yVi#tRUO#M%;pL>rQxN~6yoZ)*{{!?jU)RD*oOxDoTjVh6iNmhWNC zB5_{R=o{qvxEvi(khbRS`FOXmOO|&Dj$&~>*oo)bZz%lPhEA@ zQ;;w5eu5^%i;)w?T&*=UaK?*|U3~{0tC`rvfEsRPgR~16;~{_S2&=E{fE2=c>{+y} zx1*NTv-*zO^px5TA|B```#NetKg`19O!BK*-#~wDM@KEllk^nfQ2quy25G%)l72<> zzL$^{DDM#jKt?<>m;!?E2p0l12`j+QJjr{Lx*47Nq(v6i3M&*P{jkZB{xR?NOSPN% zU>I+~d_ny=pX??qjF*E78>}Mgts@_yn`)C`wN-He_!OyE+gRI?-a>Om>Vh~3OX5+& z6MX*d1`SkdXwvb7KH&=31RCC|&H!aA1g_=ZY0hP)-Wm6?A7SG0*|$mC7N^SSBh@MG z9?V0tv_sE>X==yV{)^LsygK2=$Mo_0N!JCOU?r}rmWdHD%$h~~G3;bt`lH& zAuOOZ=G1Mih**0>lB5x+r)X^8mz!0K{SScj4|a=s^VhUEp#2M=^#WRqe?T&H9GnWa zYOq{+gBn9Q0e0*Zu>C(BAX=I-Af9wIFhCW6_>TsIH$d>|{fIrs&BX?2G>GvFc=<8` zVJ`#^knMU~65dWGgXcht`Kb>{V2oo%<{NK|iH+R^|Gx%q+env#Js*(EBT3V0=w4F@W+oLFsA)l7Qy8mx_;6Vrk;F2RjKFvmeq} zro&>@b^(?f))OoQ#^#s)tRL>b0gzhRYRG}EU%wr9GjQ#~Rpo|RSkeik^p9x2+=rUr}vfnQoeFAlv=oX%YqbLpvyvcZ3l$B z5bo;hDd(fjT;9o7g9xUg3|#?wU2#BJ0G&W1#wn?mfNR{O7bq747tc~mM%m%t+7YN}^tMa24O4@w<|$lk@pGx!;%pKiq&mZB z?3h<&w>un8r?Xua6(@Txu~Za9tI@|C4#!dmHMzDF_-_~Jolztm=e)@vG11bZQAs!tFvd9{C;oxC7VfWq377Y(LR^X_TyX9bn$)I765l=rJ%9uXcjggX*r?u zk|0!db_*1$&i8>d&G3C}A`{Fun_1J;Vx0gk7P_}8KBZDowr*8$@X?W6v^LYmNWI)lN92yQ;tDpN zOUdS-W4JZUjwF-X#w0r;97;i(l}ZZT$DRd4u#?pf^e2yaFo zbm>I@5}#8FjsmigM8w_f#m4fEP~r~_?OWB%SGWcn$ThnJ@Y`ZI-O&Qs#Y14To( zWAl>9Gw7#}eT(!c%D0m>5D8**a@h;sLW=6_AsT5v1Sd_T-C4pgu_kvc?7+X&n_fct znkHy(_LExh=N%o3I-q#f$F4QJpy>jZBW zRF7?EhqTGk)w&Koi}QQY3sVh?@e-Z3C9)P!(hMhxmXLC zF_+ZSTQU`Gqx@o(~B$dbr zHlEUKoK&`2gl>zKXlEi8w6}`X3kh3as1~sX5@^`X_nYl}hlbpeeVlj#2sv)CIMe%b zBs7f|37f8qq}gA~Is9gj&=te^wN8ma?;vF)7gce;&sZ64!7LqpR!fy)?4cEZposQ8 zf;rZF7Q>YMF1~eQ|Z*!5j0DuA=`~VG$Gg6B?Om1 z6fM@`Ck-K*k(eJ)Kvysb8sccsFf@7~3vfnC=<$q+VNv)FyVh6ZsWw}*vs>%k3$)9| zR9ek-@pA23qswe1io)(Vz!vS1o*XEN*LhVYOq#T`;rDkgt86T@O`23xW~;W_#ZS|x zvwx-XMb7_!hIte-#JNpFxskMMpo2OYhHRr0Yn8d^(jh3-+!CNs0K2B!1dL$9UuAD= zQ%7Ae(Y@}%Cd~!`h|wAdm$2WoZ(iA1(a_-1?znZ%8h72o&Mm*4x8Ta<4++;Yr6|}u zW8$p&izhdqF=m8$)HyS2J6cKyo;Yvb>DTfx4`4R{ zPSODe9E|uflE<`xTO=r>u~u=NuyB&H!(2a8vwh!jP!yfE3N>IiO1jI>7e&3rR#RO3_}G23W?gwDHgSgekzQ^PU&G5z&}V5GO? zfg#*72*$DP1T8i`S7=P;bQ8lYF9_@8^C(|;9v8ZaK2GnWz4$Th2a0$)XTiaxNWfdq z;yNi9veH!j)ba$9pke8`y2^63BP zIyYKj^7;2don3se!P&%I2jzFf|LA&tQ=NDs{r9fIi-F{-yiG-}@2`VR^-LIFN8BC4 z&?*IvLiGHH5>NY(Z^CL_A;yISNdq58}=u~9!Ia7 zm7MkDiK~lsfLpvmPMo!0$keA$`%Tm`>Fx9JpG^EfEb(;}%5}B4Dw!O3BCkf$$W-dF z$BupUPgLpHvr<<+QcNX*w@+Rz&VQz)Uh!j4|DYeKm5IC05T$KqVV3Y|MSXom+Jn8c zgUEaFW1McGi^44xoG*b0JWE4T`vka7qTo#dcS4RauUpE{O!ZQ?r=-MlY#;VBzhHGU zS@kCaZ*H73XX6~HtHd*4qr2h}Pf0Re@!WOyvres_9l2!AhPiV$@O2sX>$21)-3i+_ z*sHO4Ika^!&2utZ@5%VbpH(m2wE3qOPn-I5Tbnt&yn9{k*eMr3^u6zG-~PSr(w$p> zw)x^a*8Ru$PE+{&)%VQUvAKKiWiwvc{`|GqK2K|ZMy^Tv3g|zENL86z7i<c zW`W>zV1u}X%P;Ajn+>A)2iXZbJ5YB_r>K-h5g^N=LkN^h0Y6dPFfSBh(L`G$D%7c` z&0RXDv$}c7#w*7!x^LUes_|V*=bd&aP+KFi((tG*gakSR+FA26%{QJdB5G1F=UuU&koU*^zQA=cEN9}Vd?OEh| zgzbFf1?@LlPkcXH$;YZe`WEJ3si6&R2MRb}LYK&zK9WRD=kY-JMPUurX-t4(Wy{%` zZ@0WM2+IqPa9D(^*+MXw2NWwSX-_WdF0nMWpEhAyotIgqu5Y$wA=zfuXJ0Y2lL3#ji26-P3Z?-&0^KBc*`T$+8+cqp`%g0WB zTH9L)FZ&t073H4?t=(U6{8B+uRW_J_n*vW|p`DugT^3xe8Tomh^d}0k^G7$3wLgP& zn)vTWiMA&=bR8lX9H=uh4G04R6>C&Zjnx_f@MMY!6HK5v$T%vaFm;E8q=`w2Y}ucJ zkz~dKGqv9$E80NTtnx|Rf_)|3wxpnY6nh3U9<)fv2-vhQ6v=WhKO@~@X57N-`7Ppc zF;I7)eL?RN23FmGh0s;Z#+p)}-TgTJE%&>{W+}C`^-sy{gTm<$>rR z-X7F%MB9Sf%6o7A%ZHReD4R;imU6<9h81{%avv}hqugeaf=~^3A=x(Om6Lku-Pn9i zC;LP%Q7Xw*0`Kg1)X~nAsUfdV%HWrpr8dZRpd-#%)c#Fu^mqo|^b{9Mam`^Zw_@j@ zR&ZdBr3?@<@%4Z-%LT&RLgDUFs4a(CTah_5x4X`xDRugi#vI-cw*^{ncwMtA4NKjByYBza)Y$hozZCpuxL{IP&=tw6ZO52WY3|iwGf&IJCn+u(>icK zZB1~bWXCmwAUz|^<&ysd#*!DSp8}DLNbl5lRFat4NkvItxy;9tpp9~|@ z;JctShv^Iq4(z+y7^j&I?GCdKMVg&jCwtCkc4*@O7HY*veGDBtAIn*JgD$QftP}8= zxFAdF=(S>Ra6(4slk#h%b?EOU-96TIX$Jbfl*_7IY-|R%H zF8u|~hYS-YwWt5+^!uGcnKL~jM;)ObZ#q68ZkA?}CzV-%6_vPIdzh_wHT_$mM%vws9lxUj;E@#1UX?WO2R^41(X!nk$+2oJGr!sgcbn1f^yl1 z#pbPB&Bf;1&2+?};Jg5qgD1{4_|%X#s48rOLE!vx3@ktstyBsDQWwDz4GYlcgu$UJ zp|z_32yN72T*oT$SF8<}>e;FN^X&vWNCz>b2W0rwK#<1#kbV)Cf`vN-F$&knLo5T& z8!sO-*^x4=kJ$L&*h%rQ@49l?7_9IG99~xJDDil00<${~D&;kiqRQqeW5*22A`8I2 z(^@`qZoF7_`CO_e;8#qF!&g>UY;wD5MxWU>azoo=E{kW(GU#pbOi%XAn%?W{b>-bTt&2?G=E&BnK9m0zs{qr$*&g8afR_x`B~o zd#dxPpaap;I=>1j8=9Oj)i}s@V}oXhP*{R|@DAQXzQJekJnmuQ;vL90_)H_nD1g6e zS1H#dzg)U&6$fz0g%|jxDdz|FQN{KJ&Yx0vfuzAFewJjv`pdMRpY-wU`-Y6WQnJ(@ zGVb!-8DRJZvHnRFiR3PG3Tu^nCn(CcZHh7hQvyd7i6Q3&ot86XI{jo%WZqCPcTR0< zMRg$ZE=PQx66ovJDvI_JChN~k@L^Pyxv#?X^<)-TS5gk`M~d<~j%!UOWG;ZMi1af< z+86U0=sm!qAVJAIqqU`Qs1uJhQJA&n@9F1PUrYuW!-~IT>l$I!#5dBaiAK}RUufjg{$#GdQBkxF1=KU2E@N=i^;xgG2Y4|{H>s` z$t`k8c-8`fS7Yfb1FM#)vPKVE4Uf(Pk&%HLe z%^4L>@Z^9Z{ZOX<^e)~adVRkKJDanJ6VBC_m@6qUq_WF@Epw>AYqf%r6qDzQ~AEJ!jtUvLp^CcqZ^G-;Kz3T;O4WG45Z zFhrluCxlY`M+OKr2SeI697btH7Kj`O>A!+2DTEQ=48cR>Gg2^5uqp(+y5Sl09MRl* zp|28!v*wvMd_~e2DdKDMMQ|({HMn3D%%ATEecGG8V9>`JeL)T0KG}=}6K8NiSN5W< z79-ZdYWRUb`T}(b{RjN8>?M~opnSRl$$^gT`B27kMym5LNHu-k;A;VF8R(HtDYJHS zU7;L{a@`>jd0svOYKbwzq+pWSC(C~SPgG~nWR3pBA8@OICK$Cy#U`kS$I;?|^-SBC zBFkoO8Z^%8Fc-@X!KebF2Ob3%`8zlVHj6H;^(m7J35(_bS;cZPd}TY~qixY{MhykQ zV&7u7s%E=?i`}Ax-7dB0ih47w*7!@GBt<*7ImM|_mYS|9_K7CH+i}?*#o~a&tF-?C zlynEu1DmiAbGurEX2Flfy$wEVk7AU;`k#=IQE*6DMWafTL|9-vT0qs{A3mmZGzOyN zcM9#Rgo7WgB_ujU+?Q@Ql?V-!E=jbypS+*chI&zA+C_3_@aJal}!Q54?qsL0In({Ly zjH;e+_SK8yi0NQB%TO+Dl77jp#2pMGtwsgaC>K!)NimXG3;m7y`W+&<(ZaV>N*K$j zLL~I+6ouPk6_(iO>61cIsinx`5}DcKSaHjYkkMuDoVl>mKO<4$F<>YJ5J9A2Vl}#BP7+u~L8C6~D zsk`pZ$9Bz3teQS1Wb|8&c2SZ;qo<#F&gS;j`!~!ADr(jJXMtcDJ9cVi>&p3~{bqaP zgo%s8i+8V{UrYTc9)HiUR_c?cfx{Yan2#%PqJ{%?Wux4J;T$#cumM0{Es3@$>}DJg zqe*c8##t;X(4$?A`ve)e@YU3d2Balcivot{1(ahlE5qg@S-h(mPNH&`pBX$_~HdG48~)$x5p z{>ghzqqn_t8~pY<5?-To>cy^6o~mifr;KWvx_oMtXOw$$d6jddXG)V@a#lL4o%N@A zNJlQAz6R8{7jax-kQsH6JU_u*En%k^NHlvBB!$JAK!cYmS)HkLAkm0*9G3!vwMIWv zo#)+EamIJHEUV|$d|<)2iJ`lqBQLx;HgD}c3mRu{iK23C>G{0Mp1K)bt6OU?xC4!_ zZLqpFzeu&+>O1F>%g-%U^~yRg(-wSp@vmD-PT#bCWy!%&H;qT7rfuRCEgw67V!Qob z&tvPU@*4*$YF#2_>M0(75QxqrJr3Tvh~iDeFhxl=MzV@(psx%G8|I{~9;tv#BBE`l z3)_98eZqFNwEF1h)uqhBmT~mSmT8k$7vSHdR97K~kM)P9PuZdS;|Op4A?O<*%!?h` zn`}r_j%xvffs46x2hCWuo0BfIQWCw9aKkH==#B(TJ%p}p-RuIVzsRlaPL_Co{&R0h zQrqn=g1PGjQg3&sc2IlKG0Io#v%@p>tFwF)RG0ahYs@Zng6}M*d}Xua)+h&?$`%rb z;>M=iMh5eIHuJ5c$aC`y@CYjbFsJnSPH&}LQz4}za9YjDuao>Z^EdL@%saRm&LGQWXs*;FzwN#pH&j~SLhDZ+QzhplV_ij(NyMl z;v|}amvxRddO81LJFa~2QFUs z+Lk zZck)}9uK^buJNMo4G(rSdX{57(7&n=Q6$QZ@lIO9#<3pA2ceDpO_340B*pHlh_y{>i&c1?vdpN1j>3UN-;;Yq?P+V5oY`4Z(|P8SwWq<)n`W@AwcQ?E9 zd5j8>FT^m=MHEWfN9jS}UHHsU`&SScib$qd0i=ky0>4dz5ADy70AeIuSzw#gHhQ_c zOp1!v6qU)@8MY+ zMNIID?(CysRc2uZQ$l*QZVY)$X?@4$VT^>djbugLQJdm^P>?51#lXBkdXglYm|4{L zL%Sr?2f`J+xrcN@=0tiJt(<-=+v>tHy{XaGj7^cA6felUn_KPa?V4ebfq7~4i~GKE zpm)e@1=E;PP%?`vK6KVPKXjUXyLS1^NbnQ&?z>epHCd+J$ktT1G&L~T)nQeExe;0Z zlei}<_ni ztFo}j7nBl$)s_3odmdafVieFxc)m!wM+U`2u%yhJ90giFcU1`dR6BBTKc2cQ*d zm-{?M&%(={xYHy?VCx!ogr|4g5;V{2q(L?QzJGsirn~kWHU`l`rHiIrc-Nan!hR7zaLsPr4uR zG{En&gaRK&B@lyWV@yfFpD_^&z>84~_0Rd!v(Nr%PJhFF_ci3D#ixf|(r@$igZiWw za*qbXIJ_Hm4)TaQ=zW^g)FC6uvyO~Hg-#Z5Vsrybz6uOTF>Rq1($JS`imyNB7myWWpxYL(t7`H8*voI3Qz6mvm z$JxtArLJ(1wlCO_te?L{>8YPzQ})xJlvc5wv8p7Z=HviPYB#^#_vGO#*`<0r%MR#u zN_mV4vaBb2RwtoOYCw)X^>r{2a0kK|WyEYoBjGxcObFl&P*??)WEWKU*V~zG5o=s@ z;rc~uuQQf9wf)MYWsWgPR!wKGt6q;^8!cD_vxrG8GMoFGOVV=(J3w6Xk;}i)9(7*U zwR4VkP_5Zx7wqn8%M8uDj4f1aP+vh1Wue&ry@h|wuN(D2W;v6b1^ z`)7XBZ385zg;}&Pt@?dunQ=RduGRJn^9HLU&HaeUE_cA1{+oSIjmj3z+1YiOGiu-H zf8u-oVnG%KfhB8H?cg%@#V5n+L$MO2F4>XoBjBeX>css^h}Omu#)ExTfUE^07KOQS znMfQY2wz?!7!{*C^)aZ^UhMZf=TJNDv8VrrW;JJ9`=|L0`w9DE8MS>+o{f#{7}B4P z{I34>342vLsP}o=ny1eZkEabr@niT5J2AhByUz&i3Ck0H*H`LRHz;>3C_ru!X+EhJ z6(+(lI#4c`2{`q0o9aZhI|jRjBZOV~IA_km7ItNtUa(Wsr*Hmb;b4=;R(gF@GmsRI`pF+0tmq0zy~wnoJD(LSEwHjTOt4xb0XB-+ z&4RO{Snw4G%gS9w#uSUK$Zbb#=jxEl;}6&!b-rSY$0M4pftat-$Q)*y!bpx)R%P>8 zrB&`YEX2%+s#lFCIV;cUFUTIR$Gn2%F(3yLeiG8eG8&)+cpBlzx4)sK?>uIlH+$?2 z9q9wk5zY-xr_fzFSGxYp^KSY0s%1BhsI>ai2VAc8&JiwQ>3RRk?ITx!t~r45qsMnj zkX4bl06ojFCMq<9l*4NHMAtIxDJOX)H=K*$NkkNG<^nl46 zHWH1GXb?Og1f0S+8-((5yaeegCT62&4N*pNQY;%asz9r9Lfr;@Bl${1@a4QAvMLbV6JDp>8SO^q1)#(o%k!QiRSd0eTmzC< zNIFWY5?)+JTl1Roi=nS4%@5iF+%XztpR^BSuM~DX9q`;Mv=+$M+GgE$_>o+~$#?*y zAcD4nd~L~EsAjXV-+li6Lua4;(EFdi|M2qV53`^4|7gR8AJI;0Xb6QGLaYl1zr&eu zH_vFUt+Ouf4SXA~ z&Hh8K@ms^`(hJfdicecj>J^Aqd00^ccqN!-f-!=N7C1?`4J+`_f^nV!B3Q^|fuU)7 z1NDNT04hd4QqE+qBP+>ZE7{v;n3OGN`->|lHjNL5w40pePJ?^Y6bFk@^k%^5CXZ<+4qbOplxpe)l7c6m%o-l1oWmCx%c6@rx85hi(F=v(2 zJ$jN>?yPgU#DnbDXPkHLeQwED5)W5sH#-eS z%#^4dxiVs{+q(Yd^ShMN3GH)!h!@W&N`$L!SbElXCuvnqh{U7lcCvHI#{ZjwnKvu~ zAeo7Pqot+Ohm{8|RJsTr3J4GjCy5UTo_u_~p)MS&Z5UrUc|+;Mc(YS+ju|m3Y_Dvt zonVtpBWlM718YwaN3a3wUNqX;7TqvAFnVUoD5v5WTh~}r)KoLUDw%8Rrqso~bJqd> z_T!&Rmr6ebpV^4|knJZ%qmzL;OvG3~A*loGY7?YS%hS{2R0%NQ@fRoEK52Aiu%gj( z_7~a}eQUh8PnyI^J!>pxB(x7FeINHHC4zLDT`&C*XUpp@s0_B^!k5Uu)^j_uuu^T> z8WW!QK0SgwFHTA%M!L`bl3hHjPp)|wL5Var_*A1-H8LV?uY5&ou{hRjj>#X@rxV>5%-9hbP+v?$4}3EfoRH;l_wSiz{&1<+`Y5%o%q~4rdpRF0jOsCoLnWY5x?V)0ga>CDo`NpqS) z@x`mh1QGkx;f)p-n^*g5M^zRTHz%b2IkLBY{F+HsjrFC9_H(=9Z5W&Eymh~A_FUJ} znhTc9KG((OnjFO=+q>JQZJbeOoUM77M{)$)qQMcxK9f;=L;IOv_J>*~w^YOW744QZ zoG;!b9VD3ww}OX<8sZ0F##8hvfDP{hpa3HjaLsKbLJ8 z0WpY2E!w?&cWi7&N%bOMZD~o7QT*$xCRJ@{t31~qx~+0yYrLXubXh2{_L699Nl_pn z6)9eu+uUTUdjHXYs#pX^L)AIb!FjjNsTp7C399w&B{Q4q%yKfmy}T2uQdU|1EpNcY zDk~(h#AdxybjfzB+mg6rdU9mDZ^V>|U13Dl$Gj+pAL}lR2a1u!SJXU_YqP9N{ose4 zk+$v}BIHX60WSGVWv;S%zvHOWdDP(-ceo(<8`y@Goy%4wDu>57QZNJc)f>Ls+}9h7 z^N=#3q3|l?aG8K#HwiW2^PJu{v|x5;awYfahC?>_af3$LmMc4%N~JwVlRZa4c+eW2 zE!zosAjOv&UeCeu;Bn5OQUC=jtZjF;NDk9$fGbxf3d29SUBekX1!a$Vmq_VK*MHQ4)eB!dQrHH)LVYNF%-t8!d`@!cb z2CsKs3|!}T^7fSZm?0dJ^JE`ZGxA&a!jC<>6_y67On0M)hd$m*RAzo_qM?aeqkm`* zXpDYcc_>TFZYaC3JV>{>mp(5H^efu!Waa7hGTAts29jjuVd1vI*fEeB?A&uG<8dLZ z(j6;-%vJ7R0U9}XkH)1g>&uptXPHBEA*7PSO2TZ+dbhVxspNW~ZQT3fApz}2 z_@0-lZODcd>dLrYp!mHn4k>>7kibI!Em+Vh*;z}l?0qro=aJt68joCr5Jo(Vk<@i) z5BCKb4p6Gdr9=JSf(2Mgr=_6}%4?SwhV+JZj3Ox^_^OrQk$B^v?eNz}d^xRaz&~ zKVnlLnK#8^y=If2f1zmb~^5lPLe?%l}>?~wN4IN((2~U{e9fKhLMtYFj)I$(y zgnKv?R+ZpxA$f)Q2l=aqE6EPTK=i0sY&MDFJp!vQayyvzh4wee<}kybNthRlX>SHh z7S}9he^EBOqzBCww^duHu!u+dnf9veG{HjW!}aT7aJqzze9K6-Z~8pZAgdm1n~aDs z8_s7?WXMPJ3EPJHi}NL&d;lZP8hDhAXf5Hd!x|^kEHu`6QukXrVdLnq5zbI~oPo?7 z2Cbu8U?$K!Z4_yNM1a(bL!GRe!@{Qom+DxjrJ!B99qu5b*Ma%^&-=6UEbC+S2zX&= zQ!%bgJTvmv^2}hhvNQg!l=kbapAgM^hruE3k@jTxsG(B6d=4thBC*4tzVpCYXFc$a zeqgVB^zua)y-YjpiibCCdU%txXYeNFnXcbNj*D?~)5AGjL+!!ij_4{5EWKGav0^={~M^q}baAFOPzxfUM>`KPf|G z&hsaR*7(M6KzTj8Z?;45zX@L#xU{4n$9Q_<-ac(y4g~S|Hyp^-<*d8+P4NHe?~vfm z@y309=`lGdvN8*jw-CL<;o#DKc-%lb0i9a3%{v&2X($|Qxv(_*()&=xD=5oBg=$B0 zU?41h9)JKvP0yR{KsHoC>&`(Uz>?_`tlLjw1&5tPH3FoB%}j;yffm$$s$C=RHi`I3*m@%CPqWnP@B~%DEe;7ZT{9!IMTo1hT3Q347HJ&!)BM2 z3~aClf>aFh0_9||4G}(Npu`9xYY1*SD|M~9!CCFn{-J$u2&Dg*=5$_nozpoD2nxqq zB!--eA8UWZlcEDp4r#vhZ6|vq^9sFvRnA9HpHch5Mq4*T)oGbruj!U8Lx_G%Lby}o zTQ-_4A7b)5A42vA0U}hUJq6&wQ0J%$`w#ph!EGmW96)@{AUx>q6E>-r^Emk!iCR+X zdIaNH`$}7%57D1FyTccs3}Aq0<0Ei{`=S7*>pyg=Kv3nrqblqZcpsCWSQl^uMSsdj zYzh73?6th$c~CI0>%5@!Ej`o)Xm38u0fp9=HE@Sa6l2oX9^^4|Aq%GA z3(AbFR9gA_2T2i%Ck5V2Q2WW-(a&(j#@l6wE4Z`xg#S za#-UWUpU2U!TmIo`CN0JwG^>{+V#9;zvx;ztc$}@NlcyJr?q(Y`UdW6qhq!aWyB5xV1#Jb{I-ghFNO0 zFU~+QgPs{FY1AbiU&S$QSix>*rqYVma<-~s%ALhFyVhAYepId1 zs!gOB&weC18yhE-v6ltKZMV|>JwTX+X)Y_EI(Ff^3$WTD|Ea-1HlP;6L~&40Q&5{0 z$e$2KhUgH8ucMJxJV#M%cs!d~#hR^nRwk|uuCSf6irJCkSyI<%CR==tftx6d%;?ef zYIcjZrP@APzbtOeUe>m-TW}c-ugh+U*RbL1eIY{?>@8aW9bb1NGRy@MTse@>= za%;5=U}X%K2tKTYe9gjMcBvX%qrC&uZ`d(t)g)X8snf?vBe3H%dG=bl^rv8Z@YN$gd9yveHY0@Wt0$s zh^7jCp(q+6XDoekb;=%y=Wr8%6;z0ANH5dDR_VudDG|&_lYykJaiR+(y{zpR=qL3|2e${8 z2V;?jgHj7}Kl(d8C9xWRjhpf_)KOXl+@c4wrHy zL3#9U(`=N59og2KqVh>nK~g9>fX*PI0`>i;;b6KF|8zg+k2hViCt}4dfMdvb1NJ-Rfa7vL2;lPK{Lq*u`JT>S zoM_bZ_?UY6oV6Ja14X^;LqJPl+w?vf*C!nGK;uU^0GRN|UeFF@;H(Hgp8x^|;ygh? zIZx3DuO(lD01ksanR@Mn#lti=p28RTNYY6yK={RMFiVd~k8!@a&^jicZ&rxD3CCI! zVb=fI?;c#f{K4Pp2lnb8iF2mig)|6JEmU86Y%l}m>(VnI*Bj`a6qk8QL&~PFDxI8b z2mcsQBe9$q`Q$LfG2wdvK`M1}7?SwLAV&)nO;kAk`SAz%x9CDVHVbUd$O(*aI@D|s zLxJW7W(QeGpQY<$dSD6U$ja(;Hb3{Zx@)*fIQaW{8<$KJ&fS0caI2Py^clOq9@Irt z7th7F?7W`j{&UmM==Lo~T&^R7A?G=K_e-zfTX|)i`pLitlNE(~tq*}sS1x2}Jlul6 z5+r#4SpQu8h{ntIv#qCVH`uG~+I8l+7ZG&d`Dm!+(rZQDV*1LS^WfH%-!5aTAxry~ z4xl&rot5ct{xQ$w$MtVTUi6tBFSJWq2Rj@?HAX1H$eL*fk{Hq;E`x|hghRkipYNyt zKCO=*KSziiVk|+)qQCGrTYH9X!Z0$k{Nde~0Wl`P{}ca%nv<6fnYw^~9dYxTnTZB&&962jX0DM&wy&8fdxX8xeHSe=UU&Mq zRTaUKnQO|A>E#|PUo+F=Q@dMdt`P*6e92za(TH{5C*2I2S~p?~O@hYiT>1(n^Lqqn zqewq3ctAA%0E)r53*P-a8Ak32mGtUG`L^WVcm`QovX`ecB4E9X60wrA(6NZ7z~*_DV_e z8$I*eZ8m=WtChE{#QzeyHpZ%7GwFHlwo2*tAuloI-j2exx3#x7EL^&D;Re|Kj-XT- zt908^soV2`7s+Hha!d^#J+B)0-`{qIF_x=B811SZlbUe%kvPce^xu7?LY|C z@f1gRPha1jq|=f}Se)}v-7MWH9)YAs*FJ&v3ZT9TSi?e#jarin0tjPNmxZNU_JFJG z+tZi!q)JP|4pQ)?l8$hRaPeoKf!3>MM-bp06RodLa*wD=g3)@pYJ^*YrwSIO!SaZo zDTb!G9d!hb%Y0QdYxqNSCT5o0I!GDD$Z@N!8J3eI@@0AiJmD7brkvF!pJGg_AiJ1I zO^^cKe`w$DsO|1#^_|`6XTfw6E3SJ(agG*G9qj?JiqFSL|6tSD6vUwK?Cwr~gg)Do zp@$D~7~66-=p4`!!UzJDKAymb!!R(}%O?Uel|rMH>OpRGINALtg%gpg`=}M^Q#V5( zMgJY&gF)+;`e38QHI*c%B}m94o&tOfae;og&!J2;6ENW}QeL73jatbI1*9X~y=$Dm%6FwDcnCyMRL}zo`0=y7=}*Uw zo3!qZncAL{HCgY!+}eKr{P8o27ye+;qJP;kOB%RpSesGoHLT6tcYp*6v~Z9NCyb6m zP#qds0jyqXX46qMNhXDn3pyIxw2f_z;L_X9EIB}AhyC`FYI}G3$WnW>#NMy{0aw}nB%1=Z4&*(FaCn5QG(zvdG^pQRU25;{wwG4h z@kuLO0F->{@g2!;NNd!PfqM-;@F0;&wK}0fT9UrH}(8A5I zt33(+&U;CLN|8+71@g z(s!f-kZZZILUG$QXm9iYiE*>2w;gpM>lgM{R9vT3q>qI{ELO2hJHVi`)*jzOk$r)9 zq}$VrE0$GUCm6A3H5J-=Z9i*biw8ng zi<1nM0lo^KqRY@Asucc#DMmWsnCS;5uPR)GL3pL=-IqSd>4&D&NKSGHH?pG;=Xo`w zw~VV9ddkwbp~m>9G0*b?j7-0fOwR?*U#BE#n7A=_fDS>`fwatxQ+`FzhBGQUAyIRZ??eJt46vHBlR>9m!vfb6I)8!v6TmtZ%G6&E|1e zOtx5xy%yOSu+<9Ul5w5N=&~4Oph?I=ZKLX5DXO(*&Po>5KjbY7s@tp$8(fO|`Xy}Y z;NmMypLoG7r#Xz4aHz7n)MYZ7Z1v;DFHLNV{)to;(;TJ=bbMgud96xRMME#0d$z-S z-r1ROBbW^&YdQWA>U|Y>{whex#~K!ZgEEk=LYG8Wqo28NFv)!t!~}quaAt}I^y-m| z8~E{9H2VnyVxb_wCZ7v%y(B@VrM6lzk~|ywCi3HeiSV`TF>j+Ijd|p*kyn;=mqtf8&DK^|*f+y$38+9!sis9N=S)nINm9=CJ<;Y z!t&C>MIeyou4XLM*ywT_JuOXR>VkpFwuT9j5>667A=CU*{TBrMTgb4HuW&!%Yt`;#md7-`R`ouOi$rEd!ErI zo#>qggAcx?C7`rQ2;)~PYCw%CkS(@EJHZ|!!lhi@Dp$*n^mgrrImsS~(ioGak>3)w zvop0lq@IISuA0Ou*#1JkG{U>xSQV1e}c)!d$L1plFX5XDXX5N7Ns{kT{y5|6MfhBD+esT)e7&CgSW8FxsXTAY=}?0A!j_V9 zJ;IJ~d%av<@=fNPJ9)T3qE78kaz64E>dJaYab5uaU`n~Zdp2h{8DV%SKE5G^$LfuOTRRjB;TnT(Jk$r{Pfe4CO!SM_7d)I zquW~FVCpSycJ~c*B*V8?Qqo=GwU8CkmmLFugfHQ7;A{yCy1OL-+X=twLYg9|H=~8H znnN@|tCs^ZLlCBl5wHvYF}2vo>a6%mUWpTds_mt*@wMN4-r`%NTA%+$(`m6{MNpi@ zMx)8f>U4hd!row@gM&PVo&Hx+lV@$j9yWTjTue zG9n0DP<*HUmJ7ZZWwI2x+{t3QEfr6?T}2iXl=6e0b~)J>X3`!fXd9+2wc1%cj&F@Z zgYR|r5Xd5jy9;YW&=4{-0rJ*L5CgDPj9^3%bp-`HkyBs`j1iTUGD4?WilZ6RO8mIE z+~Joc?GID6K96dyuv(dWREK9Os~%?$$FxswxQsoOi8M?RnL%B~Lyk&(-09D0M?^Jy zWjP)n(b)TF<-|CG%!Vz?8Fu&6iU<>oG#kGcrcrrBlfZMVl0wOJvsq%RL9To%iCW@)#& zZAJWhgzYAq)#NTNb~3GBcD%ZZOc43!YWSyA7TD6xkk)n^FaRAz73b}%9d&YisBic(?mv=Iq^r%Ug zzHq-rRrhfOOF+yR=AN!a9*Rd#sM9ONt5h~w)yMP7Dl9lfpi$H0%GPW^lS4~~?vI8Z z%^ToK#NOe0ExmUsb`lLO$W*}yXNOxPe@zD*90uTDULnH6C?InP3J=jYEO2d)&e|mP z1DSd0QOZeuLWo*NqZzopA+LXy9)fJC00NSX=_4Mi1Z)YyZVC>C!g}cY(Amaj%QN+bev|Xxd2OPD zk!dfkY6k!(sDBvsFC2r^?}hb81(WG5Lt9|riT`2?P;B%jaf5UX<~OJ;uAL$=Ien+V zC!V8u0v?CUa)4*Q+Q_u zkx{q;NjLcvyMuU*{+uDsCQ4U{JLowYby-tn@hatL zy}X>9y08#}oytdn^qfFesF)Tt(2!XGw#r%?7&zzFFh2U;#U9XBO8W--#gOpfbJ`Ey z|M8FCKlWQrOJwE;@Sm02l9OBr7N}go4V8ur)}M@m2uWjggb)DC4s`I4d7_8O&E(j; z?3$9~R$QDxNM^rNh9Y;6P7w+bo2q}NEd6f&_raor-v`UCaTM3TT8HK2-$|n{N@U>_ zL-`P7EXoEU5JRMa)?tNUEe8XFis+w8g9k(QQ)%?&Oac}S`2V$b?%`DwXBgja&&fR@ zH_XidF$p1wA)J|Wk1;?lCl?fgc)=TB3>Y8;BoMqHwJqhL)Tgydv9(?(TBX)fq%=~C zmLj!iX-kn7QA(9snzk0LRf<%SzO&~IhLor6A3f*U^UcoAygRe!H#@UCv$JUP&vPxs zeDj$1%#<2T1!e|!7xI+~_VXLl5|jHqvOhU7ZDUGee;HnkcPP=_k_FFxPjXg*9KyI+ zIh0@+s)1JDSuKMeaDZ3|<_*J8{TUFDLl|mXmY8B>Wj_?4mC#=XjsCKPEO=p0c&t&Z zd1%kHxR#o9S*C?du*}tEHfAC7WetnvS}`<%j=o7YVna)6pw(xzkUi7f#$|^y4WQ{7 zu@@lu=j6xr*11VEIY+`B{tgd(c3zO8%nGk0U^%ec6h)G_`ki|XQXr!?NsQkxzV6Bn1ea9L+@ z(Zr7CU_oXaW>VOdfzENm+FlFQ7Se0ROrNdw(QLvb6{f}HRQ{$Je>(c&rws#{dFI^r zZ4^(`J*G0~Pu_+p5AAh>RRpkcbaS2a?Fe&JqxDTp`dIW9;DL%0wxX5;`KxyA4F{(~_`93>NF@bj4LF!NC&D6Zm+Di$Q-tb2*Q z&csGmXyqA%Z9s(AxNO3@Ij=WGt=UG6J7F;r*uqdQa z?7j!nV{8eQE-cwY7L(3AEXF3&V*9{DpSYdyCjRhv#&2johwf{r+k`QB81%!aRVN<& z@b*N^xiw_lU>H~@4MWzgHxSOGVfnD|iC7=hf0%CPm_@@4^t-nj#GHMug&S|FJtr?i z^JVrobltd(-?Ll>)6>jwgX=dUy+^n_ifzM>3)an3iOzpG9Tu;+96TP<0Jm_PIqof3 zMn=~M!#Ky{CTN_2f7Y-i#|gW~32RCWKA4-J9sS&>kYpTOx#xVNLCo)A$LUme^fVNH z@^S7VU^UJ0YR8?Oy$^IYuG*bm|g;@aX~i60%`7XLy*AYpYvZ^F^U(!|RW z*C!rJ@+7TGdL=nNd1gv^%B+;Fcr$y)i0!GRsZXRHPs>QVGVR{9r_#&Qd(wL|5;H;> zD>HUw=4CF++&{7$<8G@j*nGjhEO%BQYfjeItp4mPvY*JYb1HKd!{HJ9*)(3%BR%{Pp?AM&*yHAJsW({ivOzj*qS!-7|XEn6@zo z3L*tBT%<4RxoAh>q{0n_JBmgW6&8hx?kL(_^k%VL>?xjAyrKBmSl`$=V|SK}ELl}@ zd|d0eo#RfG`bw9SK3%r4Y+rdvc}w}~ixV%tqawbdqvE-WcgE+BUpxMT%F@btm76MG zn=oQRWWuTm+a{dy)Oc2V4yX(@M{QAkx>(QB59*`dLT`Pz3Lsj9iB=HSHAiCq()ns|Cr)1*c605Cx}3V&x}Lg?b+6Q?)z7Kl zQh&1Hx`y6JY-Cwvd*ozeps}a1xAA0CR+Da;+O(i)P1C;SjOI}Dtmf6tPqo-Bl`U78 zv$kYgPntPp@G)n1an9tEoL*Vumu9`>_@I(;+5+fBa-*?fEx=mTEjZ7wq}#@Gd5_cW z!mP{N=yqEntDo)|>oy6{9cu+-3*GTnmb^`O0^FzRPO^&aG`f@F_R*aQ_e{F+_9%NW z4KG_B`@X3EVV9L>?_RNDMddA>w=e0KfAiw5?#i1NFT%Zz#nuv(&!yIU>lVxmzYKQ` zzJ*0w9<&L4aJ6A;0j|_~i>+y(q-=;2Xxhx2v%CYY^{} z^J@LO()eLo|7!{ghQ+(u$wxO*xY#)cL(|miH2_ck2yN{mu4O9=hBW*pM_()-_YdH#Ru{JtwJ^R2}3?!>>m1pohh zrn(!xCjE0Q&EH1QK?zA%sxVh&H99cObJUY$veZhQ)MLu-h%`!*G)s$2k;~+A z)Kk->Ri?`oGDEJEtI*wijm(s5f$W78FH{+qBxiU{~kq((J3uK{m z$|C8K#j-?hm8H@x%VfFqpnvu@xn1s%J7uNZC9C99a<_b1J|mx%)$%!6gPU|~<@2&m zz99GDp`|a%m*iggvfL;4%X;~WY>)@!tMWB@P`)k?$;0x9JSrRI8?s3rlgH(o@`OAo zn{f*gZ#t2u6K??hx|aElOM`Xd0t+SAIUEHvFw%?Wsm$s zUXq{6UU?a>Nc@@Xlb_2k9M1Ctr<#+O?yd}rv z_wu&=_t$!Yngd@N_AUj}T; z#*Ce|%XZr_sQcsWcsl{pCnnj+c8ZNIMmx<;w=-g$Q>BU;9k;w|zQ;4!W32Xg2Cd?{ zvmO3kuKQ^Hv;o>6ZHP8ZJ2`4~Bx?N;cf<0fi=!*G^^WzbTF3e$b&d^qqB{>nqLG81 zs94bBh%|Vj+hLu=!8(b9brJ>ZBns9^6s(gdSVyP9qnu2_I{Sg8j-rloG6{d`De5We zDe5WeY3ga}Y3ga}Y3ga}Y3ga}Y3ga}d8y~6o|k%F>UpW>rJk31Ug~+N=cS&HdOqs; zsOO`ek9t1p`Kafko{xGy>iMbXr=FjBxZMYc8a#gL`Kjlpo}YSt>iMY`pk9DF0qO*( z6QE9jIsxhgs1u-0kUBx8D@eT{^@7w3QZGooAoYUO3sNscy%6<6)C*BBM7L`dk$Xk%6}eZQXgo#!75P`>Uy*-B{uTLGUy*-B{uTLGUy*-B{uTLGqo1h^Sl?5fQHy z3@Rvsm7*022$ABYeX&1l3tg19UZPd{Y7=d(ZPnK*Z!eHN`F)=`XUP&m>-+!xexJ{O zH?uQy&YWkSnR(`!XP)Po6M+eWU=cP6lF%}8|&%ddqyBm-N z{Tbxb7T>Ub5&Qa-3;A|IxTbl@!uc_wt`W~KsKouq5?nAIk=G#~L%w9miksK%HQQQ{ zzfTavPj6Ut{ruBkb_@}Og}BCEUNL`N3kwKu2*ToWl=rNhzhYtg&RxKL@zsJLZD?6_ z)6MT)KY6VnEc-dCU%z(Yf<p=6vpVK=EbUm|aev2Sol<97XHI8v zXGLdiXI~kpyFL~$jshU}17x8WWT8XXk=5bpsP3rg7y`(n zIwk?~f{vDsO&zVBtW(#S)#>Rh>8$RIb`I$r)_Ha3q|SMrEuEV>TRR^k$lafGpY2}M zVffuAzdQcBB_By=ogbJ#NcZG;vOPAB$)oq^in@!GqD0Z(i~d^lRneb|eqZ!a(Je(c z7p*8-T(qcYUeVm5=AxNJ(~Bk+jV>Bi)L0ZPiWI)7_7<@IzyG1}62u2Jz_o}yTA=aj zhtMB^C}pn}Kx-Z(Js2;+fVfHxf(`LpH3)XZht(iB1fdxBC(c1#}I^JNDoFl zLJb1)9itFNdk&aVx@ONUs!x zPPD6&a9)ELICrKYjb}Qu5OR>d9kB-ixC{3pEezwwFAxLw z&Rt0VQV>2yL_q+xojbvUAiRb6BoBh{HsUip2*Nvvf5n3!v?KmI4}$Qn!2a9DgCM+z z*ujG!{06a$2SIoraVZai@Bv~!4+1!nz(8B*M*d+UA_}P=+@vm6KQemx|IZ&{%9ngF z6Ta1luR8(*pAzxKdcc-Q9yHt_1fFL?)u3YrS@cW)NIdu6+TkMQK-BSSzbUXicV+ z7LJQfeo#IlfbN;MP!5Nh#M-dlp!XH~1I+J>hHIkui9{peklW?<)dWOeu~{^D4PL#| zD|wXm^y>OyVQ0aZap5CH^Ox`c<=T>=rVnB_>dwaQEggHy@vmD3>0bzs8&jBFKYXyA z-4;{Y^=v0QH|FM{{VloGGiwhoyXCuqL+fHywXyxPx4yD?S+u!2$5A=EDHezTzc_1^ z$B8G1@Tg7lxULP-7V(4vy6^s)Rm!i)R}n9>dqa`hnlfLpA;5gadZ)u}W=@CenE2(o zg9q0IDl1=D`S|^^4>Hy=gPFMtS+t4OT5HM-I`k92rd^Ug8!~3%Oq=!oi6f_)jfpIynerv~O}wgE zdN%R*EO+keNVFoyJvl1fXv~m)D%p*RiPr3#)hjD9neu_m!lbUMtEAt2Y*Aj8D_t8ZI( zOLJt{`Yi{Vn)Yv5Kdf%{+O_MY7e-ty516`UNd5XvcO08O{n#Cw*4GbNGj)JG8eJ@Q zzbuTBcc6cbBu_DWIP5GH!@THQWpxD<2Gj#x+Ol-P&stk*TFHxBwc zkvJeWBhj@X7L&I0#BsWw7=GzRdEABL@;Hz!%_2nV2boGO$>*rR`I`keR*_V}tZ1jV zxD1pW3422>U9bGVy??I2skAr?3Y@IfSs*s2<`M@|bC=$eb9TLQ$KZ#x_MPtP==*wV`EOH3 z&P~?T11}||T=Rc&Tiu<}Jh`;r`|NR|C7MA*OAN~iMnsRfH?*pM8{gs&flJGQr>@Q4eq1ZnwMC4)3ed| zy64ZIe|{ar5b(>Gz(DuUU*zvXsm~f_TF@bu+v0Jhy(ggfg-Il*vU9i&7^09XY-!SfL3is01oMw=+<0u`OONSvkBOPN(&Wm24|CRYu-M^_clmsRI@E6Vi2O5HsTfyq*CrnqKf^Q?^^DGDyGgj_z>R@RGLqE=-UPD8ENsq-cmp9W_2*&+8QgS3U&jTUppg-(K4_w-?!PX4|`0`BFKde7Se8I9ECN%{OeuH_8Iw7?TfQyu)l%()Epc{}6<1$YOh- z|8f9Vl1~KYle{b};mf=k$cS%!U7q*@JNlM$pW{t-H1TOD?_eIam4tLw3GwF~1Y!^} z-^pU_O~Rp$VzfUCGm>aX_+WolK8mx-xbhLZ_2^Lo!uLz(6ceySkD<-zYsi{Mfr(ov z#FbE?s7~UVCf3vF3;+(ZkIsFxckbN1S|p0f;jh1D)4o>XJI|lr8JCY^h ztaba7r!;0sJXLH4rvy)(Om}Y87%d{sy9Lg>vji`oM*&dp^kGAR3ZmE#f(J%w!x(w& zkquVy#3L>DK7W2E@!(TWZciMzBrACynRNbns`l3H*oC+BGYd$1gSCkjicJg;Nn6Tq+tPaP&9fbY?p?QG^)g^U)lME^EH5{Xn5>uv zRcCthbQ3u};0JAd480i?u0oGmp+&$LC09d8?@i28h<&IgX@UAk7AC2l%fh|#a@+M! zfArZ$PhSrfnPJ}gd#3;WR-WwYFs1EHGw~m>xhIYNTjk9tkH>CS+BsXRyyLCatKYhV z=iXOp=plB7epAvwo90GbZk9fS%miMU!@N3cCWFcb`Wh%}qHdb5;Ezvj9kn(22c<|0 z=1V-Dyns6Zqr#F}I4tlo4og=W#e!(?V?L;mSnG&Y%ZANJ!lZJ0`6o$%5A z6$~H5XaXsLdWjWxZQz|tiVbWb#S^g@zi}?kx0O^PaR5sksL{h8B#Osc6^pS-6y!1t z-KG_c0I5_?WXjWVB77`C0E0X9N$$~z7hXOe1-sAMkd&T~4x>?4OukyeKg!$Ss|6H5 zgB~bOk%}NSOT8$!b!AJRrG^W~W3lvW_(!D??CLo`Fkp;@bdj&gQl!RTR&3Ba+^!HQ zcM>BYMw~rfP*6Cvkbcl06VyMyHCmL{3Z@kl7Saz|0P59!h_)Coo>-$bXk4NXvs9SR z6HF}jXQj^+Q;59=KB5$x&J7=^@jchhecIDX(a}&ek zaq&bvo@jmCXf_+^N9}Lu{ej0(tmnmo;H@o#*0YK+AJaokW}(q74zR({(gF=9v%Bqb zTXDIqP_I|+xK6n-JKxmLVqq&Pno8`~vU{gw^{-X79}C<(l=ZU*%$d@sUAF2xQ?9`< zbf_y*`R9)Y%p5AFv(pbMKjVFXev^KNx?$@i#U6B+n8{|*!U|=?=#N^iqzg!Xot4&{ znled^`m-4O&AK1Ey~P=(w7d~D{ntD@Q886Ci0Q79B3AjGaW@>;{k>V6ZlCj%e6;Ps z=ylQZG=pRcU$tiBwC&?(8N%gKL%zEp(_#oIci%RC%KWbF^QX0NGgLlcYIBh)+oT4{yo9ax;B(`_Zh3EE_-KeH0}s1>WWM1zi|8vM8yb;}!f zhO(RiZ!uU31~)ERJQg?5Gr9D$Xe*Xm5Hp*qC}v^p;w z*N{S;G6K<5kG?@5T>?=z=@LN2k=}Xf-`uBNVd4PSA2h4_n67NfNuN0j;swsG4xaJg z7L*Pbj#Ew^=PZz3RJW3j!b0VUbGT$csKSDU|GP+LcF9pJrBsJ=9lH5vrwS)Ti|K!5=NyGy*{4rGE8dDr?fg=uqmT+G`HiEHcE>4gPhlm$92*;Zd%Ul{ zpmt$35ulqOKA6%j;t{EBA`5A6KB6PRvexkL+I708Ne}>H@zhp9`it*R{N>86N@>x- z3&+I=F1F%dHA>wNv_XcqkjF)D`$D=XZK*6u*orDEi^MOB_}+k3N>3)%@GB4CHv#nt z?eKeKAnG4CEE<Mp%Hx^%i-A(-muYYU(^2Z)~Z|7t3D;wYa+m6+L8#*+-c=@Wm zW509ThTq(o7(us|Eq@Gk^yo;icf3SH!mP#63-wZru;#W47kX(!x~`LE(6$}Vi^47N zi~60;0vj61428fB)@M?iHc3)I^p`;w$?chLv7dAF#F^sX6=eK$oe@it)27o_nti2wO;QUQ$BiYO?c(b z$y08CxwPs&TMntO#Z)Evb|%dVLKxVcG&vO(48(u&^5bWy0(G0UOiUy_ndu-2YWw~_EjnngQRBr9$MJm7l7k%1~8!AYCYpA$= zT8QnrQCZI0jvv?|#|imD02riJ?se-8q?N#qnQE_vj^0^p))|_lA|{W!SiMfXd;0cd z^)uNLWtSoQ>R~g6)n^ngUOcz3fSs&O;xNh6oW$WSsNtI47tQYQuoc6~YGD7wM5eJI zeD(vM0&uBb_>k(Q2OsnXw=bliQaNbYG3DtbF3J~TOsU_U;tY z<)?53WlkyY6HG4WZb4hH%kt7RPE|NKt$?YRQdX67>@#HyaYvH4pnf0A{>X7t(qyZ__dbhJ@DNS8g3wYhwr*rrmI;~1cYLv&N zili4|Knm6RtQ`GL?L(L0OWR9m5@8WgvY|ynH;~r?jS)Uvj;65>V{deEnD}#ewk9Iy zCf9fBXLQlI0$x2AkJ*d7qcy02{DKo|6UG&+pQ&SiIoz6vG^GdTW$-wL91iKx7v;xf`du&bMkZ0 zDWdmMHLyAu+rpSOw8C-)tR1@fFQA+MV((ry8G4I&Tz;T0q~q_+N!MMs!}?LK-r=mm?8D1TwQF%q;k^xz(Wtad5na1(q_0unK2 zkStczCfz_zWDaN)WH<4v-qlWy>udvx^L@eL!MvsSw8|EPUet-{vRSrEc2}BPXYm(g zv&%;%@khy65o!*F$CYR6Tka6`CZj9kVuwa~skwI_5y2mv$! z-JPnCPwkP(WTGLx++|&IKk2l%j*I$4T^mSmmP?up==#je0EHj9kky8pq-br}Stz=7 z&PWt_T*W<`T`RY}k@M25_=EQqzV@1>--zX-JXZOU(U)SQmzEE*jjyE6N& zx3gD`g#u^M0q@C^d5_&5A2e%fG&3G|OuB1C{8!cAjgMLGKJ!NQ@~h*cS7iSRZSJu_ z*h#iZZFAC8V@Xlu@NclqH;?>(4VU1(nZoUN}no& zm0_%$RVIri4)D5v!PgFGvP-RS2?GsUQT^PuXEyuvBk%v?9m|r}*nI83TRc0zJo0Si?GC#&vwQ=pj z{(yY4dP&pJ#?dy)Z7*cxo|-))T{LB}?+ui*oxgTu%L8SfBjWJcz}k0RyiJ}3 zi9fP{qoBZ{yp7*GW3&qKHMb2i?*RCJMWOK*m~Rk+iJu%R;mBt|lIY3;x!b|l66o`x z`45*y3ngC#D~3c4n^lEKl(9+_i!&Pio`U~!+3e0Qy#@Y8qfZo9k%k;xMd|;#&g`*? ziGM18l!|S({bY9KbkrhkVMa&VVSlx?HPe-CYPAK*o=JZH`+*V;C0TDDYsM1yCu58e|qLKI0(-%dwMusZ?{BW7uS~!p1WyU$dRrq$O+%%@ti!fDs$>k;3swe zOt@YCLJng`F_`?_nZc|t4(Q-K(WDO*>fA!8NseMOmUNMb>J5dmojfPNFy$|D_4y+w z-n8bC)<@RdG;w6UKDYOU#E4C6r_8FnI)g#>?)Vygkk?ECJTFS%MHY_o-(WN5>=8Ty|-h$Id&pc$D*Epw+{chQY zVN0{;l?XE0BA_j8*p~%_Iwt+j4c|pi=htTtn&Xg^!Fba}B5}uC`aP`ThOF?hIrm0;S6zLX+Np z0?ny%7Y?+LA@d>U!o}(U7{rfO#X6ylmv_je&z+2lizmuw_4`LL_<14{$byGpU)@TQACXCAB4nM?DW ziH(jrM`EKhPs)lb``Ih(6=gq`!ciXC3xQYiu;mt4wpG~`%eBw>XpTKMrtGq2yDV&Z z^M+>e7s`K_gN_PErsFZ;;`~2 zxwpvUkUoIjF*>TDLTs)8#{sSoT)4jm+2IDD18GGdc8~qP4wI&ldEw*jB7dYNy}zcB zsYX6>3}==4Z2$O$Prmx(!twrWJ+jv6{@T)piXv+Uq$4mEGyt`DGy|H?+ zGWgPESV)nOk97V1H|+LPtUv4j&!6MB@(p(9Z{Us93WF!S2mZkFuxREfe*o?xJe82Hr(qPEN8kx^iW9sEp$L7-p|E;n{Bi2 zvy#pyDGQF%e0CsNhBZGa_()+(I@b@B`Xs+6I7`zaOxE6$NHT* zrMyS70w-*kkEuph1({|uFApmalndC(z?%Yh)sn30QSn=)9wlT9|C z7p2S$i#{I84rOMZ7Y$Aq8qVMy;FR~sdx&Q;gCBc0e918)>Lw2fe-y3~?3Do>6aMtW zAO2}V$AI0tk^b}X{UV7&Bo#vg zBX?XFBhgMM!+9hbyiUpI_gM!s_^O2AlM~9THqYDch&A4pbv{t~WkI7~c{#t)599Uu z_wI}BjD=tjmfOnnPyIZ%RB0I-t7pwc{bQAr*BEwIPFB9?yj{6J#@4pK3+4xbmE)uG zG_n(ezP#vpcsoK9*ucoN;kIkT&Ld86et47m;G~ zADaJ({++k8wK3)X_IEjdOamWr%G1$5johcE6eLl^xF-lmP-O#TQRiMXI9BBL+MBqb z$ZZAvL{;fK7~&{RjvLrAbB5Kl!kjUk1*R`wF>U!~L!L!BWOz2;JTS&e@6zX4-pI1q zvXm&xkkciDEQ>nhBQvN0($Y`$rWUiqW?nz8b%OGo%fByE%(RvouU67$v8m4TLZ_pE zF;UVF-)LZRHKriVX9L%&d%Swi|U!2ZYn*45pNP zL?u}1GUcH7DWu^^pURnjYvSw7@0B~*)CsNQ*!rw2XXcHjXI{>*WTXRS5vL|99LjUE z*x$ZT5toGdv^MF?kTd!IpS*khFnN*g-0ClbWK2@INQzm5SAyFsgwR2B+9pE8;d1M8 zh{4F?%ALw{sB*of)ZF6A;+Tk;nfqQ*(m$X2k}F58JQO0#uwVLs&Cpu6e7f@XG!x5Q z=_*oo==9IZXyW$4b>R zK%~1PJAV=663FfjXf0})6$gWek%4{&k+fC@pI)4R36hHqo9d|8mznqmV{H7?;%dn( zv#e+1TPJ{}9(I(6LXttB?Rt6Y7wqryq@0Gv%w!qVgd0{)1GKZ7 z_4$_9T{fGG#WM_9X;P-`;Tdcyts_`V!2=G#PZjG53ne{FiM!b$u0V$)UbF9_2Iup= zbN7CD3uo@^VP&O!Xs`0Qrq;6WyY<7pa~0d^*H{_rcX5q61lU=ebHS6->EQ0G1RP=z zB%@k!Iz5$y0^rK$*tG_51ndwpx9;N_GZl2=IpyqYr%$Hf+!tJle5AradOe3rN;i)5 z3sA3J0V)?#mt-~7zm@ZnWItyK_X)eGr!VOZc!5AX zg{27FCGFSYGQfHS@vBgby7Y+QtwLlj(oO|`bV5)M+YIS{A`qgHjz(x3P{@jKyaIQk z*ou`!NkJBcdrQPml!uajy#dxoH!fl8<_a}k-d7J>`sX&KSsE=)7=Yke64a&T>5G}k zm7SJ7&DB(2kQR{o4bU^)qP2y^KFJ)&G>^2VH+lkDp)8r{D`YV(C)aJaXXvx^<#~Ej zx!G)&k^nocByC=)a(kt^zOj537v}RzN(0lyn zm~46@Lq8e(mJGL{_(r#PZGQU5oD92cDom>?lx<@iqp(3Vn#9!wB~3+;4-HuvOw7pe zxy33mGfi@p*$Q$B@(Z){j2VpfQtV1cJKg<_=6;TxbemmD&v5&l9z%tcDe2@ApUWgI zu?79IsFzJ?rV@kEL@G|wo(S_WXAWyNSHHT0Cn>zQRC1Z5LK}eI<#0_C*SWMJTQQyC z!A1g#c7c@cy)S`i<-@6R41~5Gq2`hd@a6vKnygO}8+fA|y9EOoG_pf5#O%XL4JnBn zv9VgF$X}#eaexcMI)~%4R_vPmvX|DntAJ1@LNTAcW{f$II_`Jn^y0m!pXaL+nns4xzAU+VF$c{P{P+RK+NU6f1Q zYTj>1Zt8K8Rx46lQ$qe;yfiyTuJ3&~$tT`*c|0z+$HN>f-Q%W=*%GyeuMSrf{Vh;L zx0K?5hwjJ+F7u>UJ*FS<1U%kK?=)sMySzvnx4Q~T!r>B6P-iYupXF6RtPzDtLPY+V z+ziQ$I9CgF&z+ETryz}H; zf!Q~V8hPq=_Nu9AWOM$gc~cG@nYds?-i)i7T(ehQ%ju-P`)hfv{1f0tyB*jFpuh$5 zp`)yHz!ryp8E|pKXD}R!!od;O{028Pt!Rb;ci4a0m$tLJ|323iC@Szphi)Bu-P|F{ zABGNX=P8yqbm&%-VQIT^8x<*t4rM#7{DFD4Ky86#p47VSCsL~NkC z4~9!UBu?cAGa4IbG{&SKIYWWM!a&H`HHx+i&%p%~*BfU5JamLMh&7!;6|{6$p+~H4 zavao?;+=cyg~3X#etsC1aSgoe_63*(XKsubddY1ipF;7(km5m;qUFbS#~zWwf7D)OqeL!D+ezfdi7Z40<)zxj4r6mcIpk{o62e1-9tt} zB8dr$q(@<+x|&9l-05kR0ZlG1f2BXEQl=*PNoBQy&IMT7t#iJg+?&i z(t=RMM1Mc`+ado9cXm|oG+Is8^lDSdhtFm^jOkL7GFTnT=$7+u)z>^NLg8)mK8%_{Gm zf;s@Z#nbp>mDk6vhh+wK8&%IimTZ`C&f!uE)Kc8(`I7pwpu^+dugUt7Rn)3=K$(lf zdF0|;>r1KcVl}7-U>Bkeu2+FIo;I%Ju?dw0s-{yRGVdEYf1}6F-i8`s-BvpWt+D#t zR0VJ0#g5|Ur8t_Tb(RON;aCI67!~gYk6LgM-bF|fhpfSq$HWNMLO{LP`6?`cR7^B} zd<^)WQx6RpjY0}kz=FHGHyJKs3EyK<5~!z^xdECFEi6?WTl)RCumKkisA@nxNsNyW zI1MmWL5>YXHoakka%evSoe9|q1co&{$z^EIp-ZvMBVR^_mwjJ;@ig~P5o=Yq6LL?1 zCQiHheFmo#EYm&rs0z{__S6IVgsz|OF0s+!HA=l|(pgJMANTYZU+yD-f4Qm$UV}1< zjfa0s<#&Sy-3p1+Yu9l#wWLEQgB?F05TAd9L z3Q0E6h@%nayB*5GciH?M?A)4@6%t1Cw3@Ly~}3oNPOqEN2!mgKX09o z^rl*X_FZaMCdVP5k^Uz1xEvj(Wj!J7I_e4Pm@+m`xn2+|vVA`Fx$sPZ5@$yKNm@kF1+Q4>cU8pW*FUVaEn&urJfoWAG`zW{W}K_ z-jV$4RjKmL;)CqrcvoTa{-z%sBvMgnn)JoAYWLMn>PW1uszin{GxgL8Q3XN)_ZzIl z2J@0u@{S}!042UvJ>adVM-|<~*~-eEdbA^91dG(Zm)5f~{*+94mJkr zP3Y@1&u=m5@`+jCgfS)cOa%@xg94;2yvm)i#9400DMNMCN2D8A1eiyVBKbx=*9VFq z17HP%hfbI|k=W>fc*`&gcU~^*NL{0?m$7`>k9pgW8TS>0+c}^+N&oFY&L^^K6 z6R}W;|H)H|?ABYdMieQ#3TnOCdYy6;O3RNxUV1~hirUTo*BgW+jhp&QeULn>HZEyL zp_Ry)ob6#s7fK{ws7JqmmzOqd5VeZ~k~|J}5*Q0|6jRPvoG~Yh39dk0pTo}OjKzzp z=*lu_ohyflb#lW*L}&$>;Yv>^0GEAs$7+{CzW!GhaczY+)f;$ zB>i%#oI?YzD|PDd?xzY^e^AWtjfzjhHo)B~{7VxDu)MYN6$~#Lpac6j7D?VYEzl!V z`lrmV%+$)0`7OR+0md&WSl~giAnv>S>AM%i7bx%HHu^0~$dbP+KSkCqyFriLW1$p= z%8r~t&{<{JVPnrmP9i_t$5>I*!;2Qb_1JAiMNenx?XTKvverJdVdKIzR=xQ<<^l5d zeHs1lf2e)Y;)ff(Y@fBte4kmiu35ZcII9_)YY-LSb zc>*1?!t5+`(4i!}f@6i~Dx1wx~S9Nu`hxbm1Cn_4qy3FNC?n9%a_bu>#r&YX&zx{%*L`kWNWPLi`2`d}6 ziJYg_dSOALOWv33L#8Ia+=B-ETvGcZkFRRP5H8BK z$=)FEN$LbO?z0!D5BNIMyJqwNRjIZ=)~ileQWm(Z&P)~_01CgXze!IDXw;RxYhvei z;sg4;w14UJ37x_1qh%5ppdH?WL|L$T>WOprQ70_#vCS2c`m)XJ+~%_SNX6#fRZ}Br z&6~D)#*EF=XpUTpLlMq*z&EBZ98zhG?Dl+h{GQ>}g11{k04f}c%@ngcGopd#q;X!9C z=q+q19yF>PNIn#(8&i)IL8S;*AH6}zixiGH)70V8;Nl(-MZ!j48?QFs0}R3Q>`Gcno>A@aRC*P*9qwX?+$2H zzCK8QkWG2~HKZCgXDkQK#w$Oh8@mU<5sP50$3R8p-85g}!p8du_BtRBbuBjsxSXn4 zz~zRvmXz^UgI7Eeh>Tg99%{I4R_-HnZhl%cr;k}$UnMUcQ&)+q2EgjLbWC=UXHnzq zyY#beeEMcNOA?okscm*OoVdj+B*} zHlUGVD@=kA=?}^C2(Ci3JklEhR6CaR83ZQU1z;&u4OL)hD1(A{Ar3W~@5`*HQ{@io z+Y!k-wqQ-ztp2fffAUUXR6L7+JC-6O9jUlT#Eib#fUdyQOpcGB$RqCK4?!3!0L zvt0b^>PX4pYVSPX6%efxpoES5fy6IS?q7V+Y{uJ8ay)k6^d?V(z8J4ZfSnCTQ2bt) ze`;XQlI~%77K^!`xkUL>`4z$t?|~@xW1{msi_%ef{F&bFrv0U3OF6A!3n}X z7$wTIDjig)3HXQzD$VC`nTJc8J#tS2$Q+Xm`zE}VNE14xEqvy5ZJ@eiYo@TuDQmFE zRq}0{=n5@ONV7dcvxXS!Dn<7&P%Z3k*5`$ zUt!j=3&rpmfcJo0W_9G{+FVl-=l?ozpe;AgVO=xWa_dx^-sYI&!0*&sErXShZU~y{ zM%HD};WkIPAw54(f!FR-z$NZEHfsDvhsU1lw3piN7_a8}qqHqs#$vf*LgKabtA z0B)b$g~i!x>^1d-8#|$lkT=p?LOU4V&h)2vt!~6 ztFFjpOt(l1`o`_H(X{!td&#HqS)X1~Q_0^&EOhP;}*a(7OaYz&N_ z;R&omD8Wn;RVn4 ze6S;}Xwi!OoCk>T)4H4MAEPdKbKrHp*!R^$85}txZk=@eLgq8KZB87v^tY_CSj1-U zgn7?wQxcMK@-9Nb>VIds!$aXej}+OU;W9 z(vu)>EoR36awH!8KnqVJPxJ9=HKu!bmY#<;2G(Z|r~4atAtd3Gz6)=MrZU|xtKs6k zWEqMJ5SD3Wsl4`#kc%|Ihg8jD88G%BP0!FZR;9W9xL!5!)n75hBJoqY1L`B zrtM1?(#z6Erf*39hq2B$$M~@Eu<@&mK*qX^XEQoXxu!Lyw=)Bo_n1TG?^@C<0m~xG zz{3ATeWSt?ONM?w!^lM>_+% zbmTfFIqq|O*Kyntcl@X0AI^MdlXIQ(Jy)6QLDxBViF=Xz3HOO?A={B%o;@l1iR_oN z&t`v}W6T+v)0%T4SI!-mdnC`87t8xe-skz*`NQ*97c>_fD|o$7EL>N3swlr`LeUYA z%TwdI!SjsgjOTCO67Ll6J>H*q|5jXGJg4~a;xoQ9-w@w2-=n@0zRyeYOClxnN_LjC zm!_2tDqU2%r}Q(ND%nzY!k_OS?qBCWQ7)7ZEWe@rNcqqv_{SprSmSGU=(9=c zWimXY@LpbJe3qJtrOO8Mq-(Ua9cl80rZRECB_?q=EmVsSuU)$~fd9kP@0DAH|KKs7mtT(l z@W8L-27Em!5N_hRg~Cn3LR?*g-xx}cLd$1iUS2JXMy(Tt3BpvAyBe@=5EdaU1^mT$ zW(vwL##<$B;I#ztWHra7L70x(XX3erK4D!BX+SSn-xdQ;ujgj)cH9IESMfeb#c2|6 zg^FPhrb|%rX5o5XehpfwJ`sSgUp25_ftD=?Oe(Vo?W49YK#vE6S{~}q?;-H7zVQ9` zt?YZG`o6kWpl<;EeFH|h1>?U|!}=y%CHzKbHjzzYli3tDl}%&Q*$g(5HM3c4HoJyh%dTT{*jzRb=DY>$db~z%AzQ>2 zvn6aPTgH~-9KZ^;lC5Gb>_)bl-NbHYx3D#AEnCOdvs>A1Yy-QUZDe<_P3%s#ncc;< zu)Enk>|S;syPrM4zQZ15TiG`D5Nt-<*~9D+_9)wdfA;Yhdz|gUy0e?@VNbH}vZvTy z_C2eZR~ldb$-Z>vlpOSdWpTve#Cyv{)3%> zmHQ|7M+>jApF#@%8T&aq$xg9fusA!-UT1HxGwhe_SM1kV;of3zvv*iKdzZb(exv7X zDX2yv!!0Y9R##tDO>wBYIvEGGJim|YVJ%;y#kE=-(c-8U*J*LR7GI^tp^<7_J5nBT z%j#7;6RB1!iB_wHqt(372n`9u{61oi1Y(W^VqQ67UO8f3IbvQpVh(Rab&xj(u?8oo z!3k<`g1j-fufYpy@PZn=paw6f!3$~dLK?h~1}~(+3u*8|8a$kMK&OtV4r%a08oZDO zFRZ}}Yw&QagO?9$aKaj#um&fr!3k?{!Wx_!4Ni>)r$&QQqv2Jf!Ku-nuhE{b(Vnl> zp0CxOuhpKf)t<-ei8)@i8k|}UpIQxGtp=}FgBQ`@MKm}O4NgRZ6Vc#AG&m6rPDFzf z(cnZiI8hC+s0J^p!Ha6}q8hxY1~00?i)!$q8oW9UUY!Q7PJ>sc!K>5Y)oJkRG(REOx>!3#0L5;418eIo9x(;e|9n|PLsL^#$qwAnX*FlZ0gBm>tHF^$e^c>Xa zIjGTdP^0IdM$bWwo`V`a2g7QA1U0%2YIGgc=sBp-b5Nt>phm|*jedhQYCi@wIu2^| z8`S7GsL^jwqu-!Lzd?lBXP@~_VM!&&`I<7&Dj)NK<2Q@kl zYIGdb=s2j+aZsb<(Q#0tzL5+@s8XX5UIu2@d z9MtGIsL^pyqvN1P$3cybgBl$NH98JzbR5*^IH=KaP^06ZM#n*oj)NK<2b1($ug-@c z-fc?!0jq@mmf*;mp~HAItX7S*+z6f<8KtN;7*eAeHHz>k#2=^)MM>6RliwO!E(re{ DlhOCh diff --git a/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff b/hbase-server/src/main/resources/hbase-webapps/static/fonts/glyphicons-halflings-regular.woff old mode 100755 new mode 100644 index 2cc3e4852a5a42e6aadd6284e067b66e14a57bc7..9e612858f802245ddcbf59788a0db942224bab35 GIT binary patch literal 23424 zcmY&eV{m0%u#Iioo_J#0nb?@vwry)-+qNe*Z>))v8{5gt_uj9!t5)^yb-JtjRGrhi zYInOUNJxNyf_yKX01)K=WP|Si>HqEj|B{eUl?MR<)%<1&{(~)D+NPwKxWqT-@~snp zg9KCz1VTZDiS?UH`PRk1VPM{29cgT9=D?!Wc_@}qzggFv;gb@2cJQAYWWtpEZ7?y@jSVqjx${B5UV@SO|wH<<0; z{><1KdVI%Ki}>~<`46C0AggwUwx-|QcU;iiZ{NZu`ur>hd*|Hb(|6veERqxu=b@5Bab=rqptGxd{QJg!4*-i_$sES~)AB46}Fjg|ea#e@?J}z%CUJ zOsLWRQR1#ng^sD)A4FDuY!iUhzlgfJh(J@BRqd&P#v2B`+saBx>m+M&q7vk-75$NH%T5pi%m z5FX?`2-5l53=a&GkC9^NZCLpN5(DMKMwwab$FDIs?q>4!!xBS}75gX_5;(luk;3Vl zLCLd5a_8`Iyz}K}+#RMwu6DVk3O_-}n>aE!4NaD*sQn`GxY?cHe!Bl9n?u&g6?aKm z-P8z&;Q3gr;h`YIxX%z^o&GZZg1=>_+hP2$$-DnL_?7?3^!WAsY4I7|@K;aL<>OTK zByfjl2PA$T83*LM9(;espx-qB%wv7H2i6CFsfAg<9V>Pj*OpwX)l?^mQfr$*OPPS$ z=`mzTYs{*(UW^ij1U8UfXjNoY7GK*+YHht(2oKE&tfZuvAyoN(;_OF>-J6AMmS5fB z^sY6wea&&${+!}@R1f$5oC-2J>J-A${@r(dRzc`wnK>a7~8{Y-scc|ETOI8 zjtNY%Y2!PI;8-@a=O}+{ap1Ewk0@T`C`q!|=KceX9gK8wtOtIC96}-^7)v23Mu;MH zhKyLGOQMujfRG$p(s`(2*nP4EH7*J57^=|%t(#PwCcW7U%e=8Jb>p6~>RAlY4a*ts=pl}_J{->@kKzxH|8XQ5{t=E zV&o`$D#ZHdv&iZWFa)(~oBh-Osl{~CS0hfM7?PyWUWsr5oYlsyC1cwULoQ4|Y5RHA2*rN+EnFPnu z`Y_&Yz*#550YJwDy@brZU>0pWV^RxRjL221@2ABq)AtA%Cz?+FG(}Yh?^v)1Lnh%D zeM{{3&-4#F9rZhS@DT0E(WRkrG!jC#5?OFjZv*xQjUP~XsaxL2rqRKvPW$zHqHr8Urp2Z)L z+)EvQeoeJ8c6A#Iy9>3lxiH3=@86uiTbnnJJJoypZ7gco_*HvKOH97B? zWiwp>+r}*Zf9b3ImxwvjL~h~j<<3shN8$k-$V1p|96I!=N6VBqmb==Bec|*;HUg?) z4!5#R*(#Fe)w%+RH#y{8&%%!|fQ5JcFzUE;-yVYR^&Ek55AXb{^w|@j|&G z|6C-+*On%j;W|f8mj?;679?!qY86c{(s1-PI2Wahoclf%1*8%JAvRh1(0)5Vu37Iz z`JY?RW@qKr+FMmBC{TC7k@}fv-k8t6iO}4K-i3WkF!Lc=D`nuD)v#Na zA|R*no51fkUN3^rmI;tty#IK284*2Zu!kG13!$OlxJAt@zLU`kvsazO25TpJLbK&;M8kw*0)*14kpf*)3;GiDh;C(F}$- z1;!=OBkW#ctacN=je*Pr)lnGzX=OwgNZjTpVbFxqb;8kTc@X&L2XR0A7oc!Mf2?u9 zcctQLCCr+tYipa_k=;1ETIpHt!Jeo;iy^xqBES^Ct6-+wHi%2g&)?7N^Yy zUrMIu){Jk)luDa@7We5U!$$3XFNbyRT!YPIbMKj5$IEpTX1IOtVP~(UPO2-+9ZFi6 z-$3<|{Xb#@tABt0M0s1TVCWKwveDy^S!!@4$s|DAqhsEv--Z}Dl)t%0G>U#ycJ7cy z^8%;|pg32=7~MJmqlC-x07Sd!2YX^|2D`?y;-$a!rZ3R5ia{v1QI_^>gi(HSS_e%2 zUbdg^zjMBBiLr8eSI^BqXM6HKKg#@-w`a**w(}RMe%XWl3MipvBODo*hi?+ykYq)z ziqy4goZw0@VIUY65+L7DaM5q=KWFd$;W3S!Zi>sOzpEF#(*3V-27N;^pDRoMh~(ZD zJLZXIam0lM7U#)119Hm947W)p3$%V`0Tv+*n=&ybF&}h~FA}7hEpA&1Y!BiYIb~~D z$TSo9#3ee02e^%*@4|*+=Nq6&JG5>zX4k5f?)z*#pI-G(+j|jye%13CUdcSP;rNlY z#Q!X%zHf|V)GWIcEz-=fW6AahfxI~y7w7i|PK6H@@twdgH>D_R@>&OtKl}%MuAQ7I zcpFmV^~w~8$4@zzh~P~+?B~%L@EM3x(^KXJSgc6I=;)B6 zpRco2LKIlURPE*XUmZ^|1vb?w*ZfF}EXvY13I4af+()bAI5V?BRbFp`Sb{8GRJHd* z4S2s%4A)6Uc=PK%4@PbJ<{1R6+2THMk0c+kif**#ZGE)w6WsqH z`r^DL&r8|OEAumm^qyrryd(HQ9olv$ltnVGB{aY?_76Uk%6p;e)2DTvF(;t=Q+|8b zqfT(u5@BP);6;jmRAEV057E*2d^wx@*aL1GqWU|$6h5%O@cQtVtC^isd%gD7PZ_Io z_BDP5w(2*)Mu&JxS@X%%ByH_@+l>y07jIc~!@;Raw)q_;9oy@*U#mCnc7%t85qa4? z%_Vr5tkN^}(^>`EFhag;!MpRh!&bKnveQZAJ4)gEJo1@wHtT$Gs6IpznN$Lk-$NcM z3ReVC&qcXvfGX$I0nfkS$a|Pm%x+lq{WweNc;K>a1M@EAVWs2IBcQPiEJNt}+Ea8~WiapASoMvo(&PdUO}AfC~>ZGzqWjd)4no( ziLi#e3lOU~sI*XPH&n&J0cWfoh*}eWEEZW%vX?YK!$?w}htY|GALx3;YZoo=JCF4@ zdiaA-uq!*L5;Yg)z-_`MciiIwDAAR3-snC4V+KA>&V%Ak;p{1u>{Lw$NFj)Yn0Ms2*kxUZ)OTddbiJM}PK!DM}Ot zczn?EZXhx3wyu6i{QMz_Ht%b?K&-@5r;8b076YDir`KXF0&2i9NQ~#JYaq*}Ylb}^ z<{{6xy&;dQ;|@k_(31PDr!}}W$zF7Jv@f%um0M$#=8ygpu%j(VU-d5JtQwT714#f0z+Cm$F9JjGr_G!~NS@L9P;C1? z;Ij2YVYuv}tzU+HugU=f9b1Wbx3418+xj$RKD;$gf$0j_A&c;-OhoF*z@DhEW@d9o zbQBjqEQnn2aG?N9{bmD^A#Um6SDKsm0g{g_<4^dJjg_l_HXdDMk!p`oFv8+@_v_9> zq;#WkQ!GNGfLT7f8m60H@$tu?p;o_It#TApmE`xnZr|_|cb3XXE)N^buLE`9R=Qbg zXJu}6r07me2HU<)S7m?@GzrQDTE3UH?FXM7V+-lT#l}P(U>Fvnyw8T7RTeP`R579m zj=Y>qDw1h-;|mX-)cSXCc$?hr;43LQt)7z$1QG^pyclQ1Bd!jbzsVEgIg~u9b38;> zfsRa%U`l%did6HzPRd;TK{_EW;n^Ivp-%pu0%9G-z@Au{Ry+EqEcqW=z-#6;-!{WA z;l+xC6Zke>dl+(R1q7B^Hu~HmrG~Kt575mzve>x*cL-shl+zqp6yuGX)DDGm`cid! znlnZY=+a5*xQ=$qM}5$N+o!^(TqTFHDdyCcL8NM4VY@2gnNXF|D?5a558Lb*Yfm4) z_;0%2EF7k{)i(tTvS`l5he^KvW%l&-suPwpIlWB_Za1Hfa$@J!emrcyPpTKKM@NqL z?X_SqHt#DucWm<3Lp}W|&YyQE27zbGP55=HtZmB(k*WZA79f##?TweCt{%5yuc+Kx zgfSrIZI*Y57FOD9l@H0nzqOu|Bhrm&^m_RK6^Z<^N($=DDxyyPLA z+J)E(gs9AfaO`5qk$IGGY+_*tEk0n_wrM}n4G#So>8Dw6#K7tx@g;U`8hN_R;^Uw9JLRUgOQ?PTMr4YD5H7=ryv)bPtl=<&4&% z*w6k|D-%Tg*F~sh0Ns(h&mOQ_Qf{`#_XU44(VDY8b})RFpLykg10uxUztD>gswTH} z&&xgt>zc(+=GdM2gIQ%3V4AGxPFW0*l0YsbA|nFZpN~ih4u-P!{39d@_MN)DC%d1w z7>SaUs-g@Hp7xqZ3Tn)e z7x^sC`xJ{V<3YrmbB{h9i5rdancCEyL=9ZOJXoVHo@$$-%ZaNm-75Z-Ry9Z%!^+STWyv~To>{^T&MW0-;$3yc9L2mhq z;ZbQ5LGNM+aN628)Cs16>p55^T^*8$Dw&ss_~4G5Go63gW^CY+0+Z07f2WB4Dh0^q z-|6QgV8__5>~&z1gq0FxDWr`OzmR}3aJmCA^d_eufde7;d|OCrKdnaM>4(M%4V`PxpCJc~UhEuddx9)@)9qe_|i z)0EA%&P@_&9&o#9eqZCUCbh?`j!zgih5sJ%c4(7_#|Xt#r7MVL&Q+^PQEg3MBW;4T zG^4-*8L%s|A}R%*eGdx&i}B1He(mLygTmIAc^G(9Si zK7e{Ngoq>r-r-zhyygK)*9cj8_%g z)`>ANlipCdzw(raeqP-+ldhyUv_VOht+!w*>Sh+Z7(7(l=9~_Vk ztsM|g1xW`?)?|@m2jyAgC_IB`Mtz(O`mwgP15`lPb2V+VihV#29>y=H6ujE#rdnK` zH`EaHzABs~teIrh`ScxMz}FC**_Ii?^EbL(n90b(F0r0PMQ70UkL}tv;*4~bKCiYm zqngRuGy`^c_*M6{*_~%7FmOMquOEZXAg1^kM`)0ZrFqgC>C%RJvQSo_OAA(WF3{euE}GaeA?tu5kF@#62mM$a051I zNhE>u>!gFE8g#Jj95BqHQS%|>DOj71MZ?EYfM+MiJcX?>*}vKfGaBfQFZ3f^Q-R1# znhyK1*RvO@nHb|^i4Ep_0s{lZwCNa;Ix<{E5cUReguJf+72QRZIc%`9-Vy)D zWKhb?FbluyDTgT^naN%l2|rm}oO6D0=3kfXO2L{tqj(kDqjbl(pYz9DykeZlk4iW5 zER`)vqJxx(NOa;so@buE!389-YLbEi@6rZG0#GBsC+Z0fzT6+d7deYVU;dy!rPXiE zmu73@Jr&~K{-9MVQD}&`)e>yLNWr>Yh8CXae9XqfvVQ&eC_;#zpoaMxZ0GpZz7xjx z`t_Q-F?u=vrRPaj3r<9&t6K=+egimiJ8D4gh-rUYvaVy zG($v+3zk5sMuOhjxkH7bQ}(5{PD3Mg?!@8PkK&w>n7tO8FmAmoF30_#^B~c(Q_`4L zYWOoDVSnK|1=p{+@`Fk^Qb81Xf89_S`RSTzv(a4ID%71nll%{Wad$!CKfeTKkyC?n zCkMKHU#*nz_(tO$M)UP&ZfJ#*q(0Gr!E(l5(ce<3xut+_i8XrK8?Xr7_oeHz(bZ?~8q5q~$Rah{5@@7SMN zx9PnJ-5?^xeW2m?yC_7A#WK*B@oIy*Y@iC1n7lYKj&m7vV;KP4TVll=II)$39dOJ^czLRU>L> z68P*PFMN+WXxdAu=Hyt3g$l(GTeTVOZYw3KY|W0Fk-$S_`@9`K=60)bEy?Z%tT+Iq z7f>%M9P)FGg3EY$ood+v$pdsXvG? zd2q3abeu-}LfAQWY@=*+#`CX8RChoA`=1!hS1x5dOF)rGjX4KFg!iPHZE2E=rv|A} zro(8h38LLFljl^>?nJkc+wdY&MOOlVa@6>vBki#gKhNVv+%Add{g6#-@Z$k*ps}0Y zQ=8$)+Nm||)mVz^aa4b-Vpg=1daRaOU)8@BY4jS>=5n#6abG@(F2`=k-eQ9@u# zxfNFHv=z2w@{p1dzSOgHokX1AUGT0DY4jQI@YMw)EWQ~q5wmR$KQ}Y;(HPMSQCwzu zdli|G?bj(>++CP)yQ4s6YfpDc3KqPmquQSxg%*EnTWumWugbDW5ef%8j-rT#3rJu? z)5n;4b2c*;2LIW%LmvUu6t1~di~}0&Svy}QX#ER|hDFZwl!~zUP&}B1oKAxIzt~so zb!GaJYOb#&qRUjEI1xe_`@7qv_-LggQ$JE8+{ryT4%ldwC5ete+{G3C#g@^oxfY3#F zcLlj(l2G8>tC<5XWV|6_DZQZ7ow?MD8EZ9mM2oV~WoV-uoExmbwpzc6eMV}%J_{3l zW(4t2a-o}XRlU|NSiYn!*nR(Sc>*@TuU*(S77gfCi7+WR%2b;4#RiyxWR3(u5BIdf zo@#g4wQjtG3T$PqdX$2z8Zi|QP~I^*9iC+(!;?qkyk&Q7v>DLJGjS44q|%yBz}}>i z&Ve%^6>xY<=Pi9WlwpWB%K10Iz`*#gS^YqMeV9$4qFchMFO}(%y}xs2Hn_E}s4=*3 z+lAeCKtS}9E{l(P=PBI;rsYVG-gw}-_x;KwUefIB@V%RLA&}WU2XCL_?hZHoR<7ED zY}4#P_MmX(_G_lqfp=+iX|!*)RdLCr-1w`4rB_@bI&Uz# z!>9C3&LdoB$r+O#n);WTPi;V52OhNeKfW6_NLnw zpFTuLC^@aPy~ZGUPZr;)=-p|b$-R8htO)JXy{ecE5a|b{{&0O%H2rN&9(VHxmvNly zbY?sVk}@^{aw)%#J}|UW=ucLWs%%j)^n7S%8D1Woi$UT}VuU6@Sd6zc2+t_2IMBxd zb4R#ykMr8s5gKy=v+opw6;4R&&46$V+OOpDZwp3iR0Osqpjx))joB*iX+diVl?E~Q zc|$qmb#T#7Kcal042LUNAoPTPUxF-iGFw>ZFnUqU@y$&s8%h-HGD`EoNBbe#S>Y-4 zlkeAP>62k~-N zHQqXXyN67hGD6CxQIq_zoepU&j0 zYO&}<4cS^2sp!;5))(aAD!KmUED#QGr48DVlwbyft31WlS2yU<1>#VMp?>D1BCFfB z_JJ-kxTB{OLI}5XcPHXUo}x~->VP%of!G_N-(3Snvq`*gX3u0GR&}*fFwHo3-vIw0 zeiWskq3ZT9hTg^je{sC^@+z3FAd}KNhbpE5RO+lsLgv$;1igG7pRwI|;BO7o($2>mS(E z$CO@qYf5i=Zh6-xB=U8@mR7Yjk%OUp;_MMBfe_v1A(Hqk6!D})x%JNl838^ZA13Xu zz}LyD@X2;5o1P61Rc$%jcUnJ>`;6r{h5yrEbnbM$$ntA@P2IS1PyW^RyG0$S2tUlh z8?E(McS?7}X3nAAJs2u_n{^05)*D7 zW{Y>o99!I9&KQdzgtG(k@BT|J*;{Pt*b|?A_})e98pXCbMWbhBZ$t&YbNQOwN^=F) z_yIb_az2Pyya2530n@Y@s>s>n?L79;U-O9oPY$==~f1gXro5Y z*3~JaenSl_I}1*&dpYD?i8s<7w%~sEojqq~iFnaYyLgM#so%_ZZ^WTV0`R*H@{m2+ zja4MX^|#>xS9YQo{@F1I)!%RhM{4ZUapHTKgLZLcn$ehRq(emb8 z9<&Nx*RLcS#)SdTxcURrJhxPM2IBP%I zf1bWu&uRf{60-?Gclb5(IFI*!%tU*7d`i!l@>TaHzYQqH4_Y*6!Wy0d-B#Lz7Rg3l zqKsvXUk9@6iKV6#!bDy5n&j9MYpcKm!vG7z*2&4G*Yl}iccl*@WqKZWQSJCgQSj+d ze&}E1mAs^hP}>`{BJ6lv*>0-ft<;P@`u&VFI~P3qRtufE11+|#Y6|RJccqo27Wzr}Tp|DH z`G4^v)_8}R24X3}=6X&@Uqu;hKEQV^-)VKnBzI*|Iskecw~l?+R|WKO*~(1LrpdJ? z0!JKnCe<|m*WR>m+Qm+NKNH<_yefIml z+x32qzkNRrhR^IhT#yCiYU{3oq196nC3ePkB)f%7X1G^Ibog$ZnYu4(HyHUiFB`6x zo$ty-8pknmO|B9|(5TzoHG|%>s#7)CM(i=M7Nl=@GyDi-*ng6ahK(&-_4h(lyUN-oOa$` zo+P;C4d@m^p9J4c~rbi$rq9nhGxayFjhg+Rqa{l#`Y z!(P6K7fK3T;y!VZhGiC#)|pl$QX?a)a9$(4l(usVSH>2&5pIu5ALn*CqBt)9$yAl; z-{fOmgu><7YJ5k>*0Q~>lq72!XFX6P5Z{vW&zLsraKq5H%Z26}$OKDMv=sim;K?vsoVs(JNbgTU8-M%+ zN(+7Xl}`BDl=KDkUHM9fLlV)gN&PqbyX)$86!Wv!y+r*~kAyjFUKPDWL3A)m$@ir9 zjJ;uQV9#3$*`Dqo1Cy5*;^8DQcid^Td=CivAP+D;gl4b7*xa9IQ-R|lY5tIpiM~9- z%Hm9*vDV@_1FfiR|Kqh_5Ml0sm?abD>@peo(cnhiSWs$uy&$RYcd+m`6%X9FN%?w}s~Q=3!pJzbN~iJ}bbM*PPi@!E0eN zhKcuT=kAsz8TQo76CMO+FW#hr6da({mqpGK2K4T|xv9SNIXZ}a=4_K5pbz1HE6T}9 zbApW~m0C`q)S^F}B9Kw5!eT)Bj_h9vlCX8%VRvMOg8PJ*>PU>%yt-hyGOhjg!2pZR4{ z=VR_*?Hw|aai##~+^H>3p$W@6Zi`o4^iO2Iy=FPdEAI58Ebc~*%1#sh8KzUKOVHs( z<3$LMSCFP|!>fmF^oESZR|c|2JI3|gucuLq4R(||_!8L@gHU8hUQZKn2S#z@EVf3? zTroZd&}JK(mJLe>#x8xL)jfx$6`okcHP?8i%dW?F%nZh=VJ)32CmY;^y5C1^?V0;M z<3!e8GZcPej-h&-Osc>6PU2f4x=XhA*<_K*D6U6R)4xbEx~{3*ldB#N+7QEXD^v=I z+i^L+V7_2ld}O2b-(#bmv*PyZI4|U#Q5|22a(-VLOTZc3!9ns1RI-? zA<~h|tPH0y*bO1#EMrsWN>4yJM7vqFZr?uw$H8*PhiHRQg1U9YoscX-G|gck+SSRX!(e7@~eeUEw+POsT;=W9J&=EV`cUc{PIg_#TQVGnZsQbCs7#Q-)v#BicxLw#Fb?#)8TYbu zN)5R=MI1i7FHhF|X}xEl=sW~`-kf;fOR^h1yjthSw?%#F{HqrY2$q>7!nbw~nZ8q9 zh{vY! z%i=H!!P&wh z7_E%pB7l5)*VU>_O-S~d5Z!+;f{pQ4e86*&);?G<9*Q$JEJ!ZxY;Oj5&@^eg0Zs!iLCAR`2K?MSFzjX;kHD6)^`&=EZOIdW>L#O`J zf~$M4}JiV}v6B-e{NUBGFgj-*H%NG zfY0X(@|S8?V)drF;2OQcpDl2LV=~=%gGx?_$fbSsi@%J~taHcMTLLpjNF8FkjnjyM zW;4sSf6RHaa~LijL#EJ0W2m!BmQP(f=%Km_N@hsBFw%q#7{Er?y1V~UEPEih87B`~ zv$jE%>Ug9&=o+sZVZL7^+sp)PSrS;ZIJac4S-M>#V;T--4FXZ*>CI7w%583<{>tb6 zOZ8gZ#B0jplyTbzto2VOs)s9U%trre`m=RlKf{I_Nwdxn(xNG%zaVNurEYiMV3*g| z``3;{j7`UyfFrjlEbIJN{0db|r>|LA@=vX9CHFZYiexnkn$b%8Rvw0TZOQIXa;oTI zv@j;ZP+#~|!J(aBz9S{wL7W%Dr1H)G-XUNt9-lP?ijJ-XEj1e*CI~-Xz@4(Xg;UoG z{uzBf-U+(SHe}6oG%;A*93Zb=oE>uTb^%qsL>|bQf?7_6=KIiPU`I|r;YcZ!YG7y~ zQu@UldAwz$^|uoz3mz1;An-WVBtefSh-pv<`n&TU3oM!hrEI?l@v8A4#^$4t&~T32 zl*J=1q~h+60sNc43>0aVvhzyfjshgPYZoQ(OOh>LbUIoblb@1z~zp?))n?^)q6WGuDh}gMUaA9|X z3qq-XlcNldy5==T4rq*~g@XVY!9sYZjo#R7 zr{n)r5^S{9+$+8l7IVB*3_k5%-TBY@C%`P@&tZf>82sm#nfw7L%92>nN$663yW!yt zhS>EfLcE_Z)gv-Y^h1;xj(<4nD4GY{C-nWUgQc9cMmH{qpa!uEznrGF^?bbJHApScQ$j>$JZHAX80DdXu z--AMgrA0$Otdd#N9#!cg2Z~N8&lj1d+wDh+^ZObWJ$J)_h(&2#msu>q0B$DEERy{1 zCJN{7M@%#E@8pda`@u!v@{gcT3bA*>g*xYLXlbb&o@1vX*x+l}Voys6o~^_7>#GB| z*r!R%kA9k%J`?m>1tMHB9x$ZRe0$r~ui}X}jOC)9LH=Po*2SLdtf3^4?VKnu2ox&mV~0oDgi` z;9d}P$g~9%ThTK8s}5ow2V4?(-lU*ed8ro|}mU}pk% z;bqB0bx3AOk<0Joeh}Vl@_7Po&C`Cg>>gff>e7fu41U3Ic{JQu1W%+!Gvz3GDO2ixKd;KF6UEw8F_cDAh08gB>@ zaRH2Q96sBJ>`4aXvrF0xPtIWoA1pPsRQtU~xDtnEfTJnl{A9u5pR^K8=UdNq%T8F$)FbN> zgK+_(BF#D>R>kK!M#OT~=@@}3yAYqm33?{Bv?2iBr|-aRK0@uapzuXI)wE0=R@m^7 zQ`wLBn(M*wg!mgmQT1d!@3<2z>~rmDW)KG0*B4>_R6LjiI0^9QT8gtDDT|Lclxppm z+OeL6H3QpearJAB%1ellZ6d*)wBQ(hPbE=%?y6i^uf%`RXm*JW*WQ%>&J+=V(=qf{ zri~yItvTZbII+7S0>4Q0U9@>HnMP$X>8TqAfD(vAh};2P{QK)ik`a6$W$nG<{bR2Ufd!^iE z#1K58$gW!xpeYHeehuhQCXZ9p%N8m zB+l~T_u-Ycr!U>!?xu!!*6rNxq37{`DhMMfY6NpD3Jw zkYQDstvt30Hc_SaZuuMP2YrdW@HsPMbf^Y9lI<9$bnMil2X7`Ba-DGLbzgqP>mxwe zf1&JkDH54D3nLar2KjJ3z`*R+rUABq4;>>4Kjc2iQEj7pVLcZYZ~pteAG4rm1{>PQy=!QiV5G|tVk)53 zP?Azw+N)Yq3zZ`dW7Q9Bq@Y*jSK0<1f`HM;_>GH57pf_S%Ounz_yhTY8lplQSM`xx zU{r-Deqs+*I~sLI$Oq`>i`J1kJ(+yNOYy$_>R3Jfi680<|^u#J@aY%Q>O zqfI~sCbk#3--^zMkV&Yj0D(R^rK}+_npgPr_4^kYuG=pO%$C_7v{s@-{M-P@RL3^<`kO@b=YdKMuccfO1ZW# zeRYE%D~CMAgPlo?T!O6?b|pOZv{iMWb;sN=jF%=?$Iz_5zH?K;aFGU^8l7u%zHgiy z%)~y|k;Es-7YX69AMj^epGX#&^c@pp+lc}kKc`5CjPN4Z$$e58$Yn*J?81%`0~A)D zPg-db*pj-t4-G9>ImW4IMi*v#9z^9VD9h@9t;3jMAUVxt=oor+16yHf{lT|G4 zya6{4#BxFw!!~UTRwXXawKU4iz$$GMY6=Z8VM{2@0{=5A0+A#p6$aT3ubRyWMWPq9 zCEH5(Il0v4e4=Yxg(tDglfYAy!UpC>&^4=x7#6_S&Ktds)a8^`^tp6RnRd{KImB^o z2n=t#>iKx<*evmvoE{+fH#@WXGWs$)Uxrtf?r>AaxV0?kf0o@oDboJ6z0cgP@A$;k>SK1UqC?Q_ zk_I?j74;}uNXhOf_5ZxQSgB4otDEb9JJrX1kq`-o%T>g%M5~xXf!2_4P~K64tKgXq z&KHZ0@!cPvUJG4kw-0;tPo$zJrU-Nop>Uo65Pm|yaNvKjhi7V1g98;^N1~V3% zTR>yWa+X2FJ_wpPwz3i^6AGwOa_VMS-&`*KoKgF2&oR10Jn6{!pvVG@n=Jk@vjNuY zL~P7aDGhg~O9G^!bHi$8?G9v9Gp0cmekYkK;(q=47;~gI>h-kx-ceM{ml$#8KI$4ltyjaqP zki^cyDERloAb)dcDBU4na9C(pfD{P@eBGA}0|Rb)p{ISqi60=^FUEdF!ok{Gs;vb) zfj9(#1QA64w*ud^YsN5&PeiI>c`VioE8h)e}W%S9NMA55Gs zrWL6l+@3CKd@8(UQLTwe12SGWMqRn+j)QZRj*g)Xua)%ayzpqs{pD(WWESJYL3{M$ z%qkpM`jFoqLYVv6{IbCkL?fEiJj$VG=$taup&RL9e{s(Sgse2xVJlw0h74EXJKt2eX|dxz{->0)3W`JN7Bv!rLvRZc z0tAOZ2yVe4g9iq826qXAg`f!*+}(o1;1FDb>kKexumFS40KvK0yH1_@Z=LgWZ+}(Y zwYsa;OLz6tTA%gS=>8$=Z7pLh>|K2QElL)E=Q*(n*H`8R`8={-@4mTD-SWBOYRxV? zmF(-rJB8^Wlp?319rTrh^?QEP?|Msxrv?WbJ-+id+V#F2Y4(JPJ6U9bv+U1cIIH^W z)lg$_=g^Ma>2~Pyd_YOAv29Cb-U6DJO?NxnW7~QP*SmYi*vdUVuW#LWQ_u0`hymZi zaQS3Nb^4`ro$>0G%zbXmr5|D|iq0R<;S@?kr0j5Ruq87-Z1>crx%EzVZ9#U;{?}ti zW2W%*9MQg3Nbh%Ti6LhDd|-aFSgXoPG`mHlUU1iCHr>ru>DX?W_#13(`u*!Plu2OP z6jk=2>BC0l)aw;HCmxoYD1i4b%m$1`DYC_^L~ zIEAnFcHvad=-aO3(_MI=9#`z6-9*_!&$?<%meb5;jGd5Qp=MGf z6BD{%`L#TAOq%z%@*ib95Ey7NbUF=BlszVk3Iu3imD&*91N-ij%hW?W@~2TtdHTfP z#n0@Xd7X8Dyu36n{k#PwQ~T~X7mAO^cNV+z<HO@3X-# z_@rAn$k~(l@kciCC;&Qd*fWRI>=;fL{UPlciNDWyj$bX<#r^(r;EE8wwUVQm&7~QY zCXRj!**r^xybAEPq>h3W$uvI1j=yNIyzkE_D7fpGw)OV{U*Uwm{xB;mEg2(|y|ICd zMdQVqzMb-=XM6|E-a9kNh)^9lY`-DjhhHD1w5lufRcy+QLgJ47!fFne86#F; zX{ufroVBEZJOY?rDo!;Te6aOZ^1SO!dYRxQ*2njyA~dCWawn)>!*k7~>8Ikt&e*0>>V5ZbO|*1+2LFOqVe zXHb!aMk03^h%&9L8GMy7UDI2Kev>V@(R}*Iu6x+!Hn4~D@wj`P%#Hdbf(lK{+DD7f zJ&(v*mhn_e(R$^5L#bM^^Q@-!*b!l|+Xrb(q*MRFJYnrE7*xko!SJOy9LngR2|q5k zY`Ioiu+YBfzF{Labszk-E#*BYQk>$()=xWEGZRKwY)*UxP}0dGuPLZOkNJDI9Hy zFjfwiK6RjhH#rHW#B0(MW}i%V`943<6@Z*Nd^JEP5uZonXm=u%AM>{H^U@&Jy*i0s za_Da^xI6pMtXzHc{e~_ZcnKP*;=YL2Z^RmzDl{dJTk7*}E_h*NvgnhnxVKB59Duh~ zqouS_WoOR*{UvUw_K#OWz;gMracr%8>QQ&V*jv!8)ho;U8}9~8EU{N<=Z_gR%IpMT zbkePUG_afm=#|iIfFmdqkpLMGxY5D$`?I}&T7>TexU@v zkBx09kG)O;09ckj#(_Uov6vv{{HOcr-%H#DUQ@*GzF8Zh{iSM13%fuB%>wjdU@3Nf zlnYE!GTyNrqes|;nLFXfWU*Wg-9wmr=NBd$nCk+H?iwNvcd0Wab^3CT9a`>3V~oWI z9=_H+N-Q=MQ(io4u4mpdQ;k&5FXnKV5M7R`@WJ9h(GrAirO#XXOU{qQpk^B^Vd=Dt{wiqT zg-#j9J~@o%H2;W9mg)o6@*Vo;BSs2*4HAHpDk02mndAsov08R_48zJZ@J)s7+hyCo zy*0L#y)?AqZt-wX%+_Vx`8*A95OLHvs1$k~{h-_N_vov_gHJE=`X>L?5K+ zD?u59=mjtImMvd1GsDytuYp{IyUkW&?h zF>$#`n$~bZ)KN0B$XGeMYh&`;g8 zo_2-koaO6+8O!+L>SpIQbG(i;QW9UJi{Ecewlo?s&D!^>i$|#jaW}#HJuxt|W48=? zb^Y&O$a1s5ddr8DIt!sD!t=y1g(d4GR(s;s-HfV$GXl&m;+sAAxB^rk(3_NjE$p#L z*t4em?tA0d+XwRxN^OQwzbDZMuSE0J1)Ky{mq)^t4bnSl*)s>zNM@mMdtd78&ebHN z`!(|lE5q-p+TsRaNnMXwALaN5QIZ2IUi^Z22tsN5>nvIO+YU}Q*xh6}ee6@rR~<&1 z(PB4z>9ZBUMXZwSMmd9-aKKsmJeJq^G|#JclOh*xf0?^e0(`40nsg1z)(48;4}B_( zGwPI)yo|{oX{dVDL-5-aMGr;~vU1cPtJP5JM(sswz&Q`e<@0?y{YhsO9YK8EYJA;L z>7oG_Mts+(wCBC*Md82#XdKw&J*IizR?9k^rf1r{Ot-&>V^ke{9nI9zavlcNkIJtN z7T>?o|4rENk-?|lewZ(EfdR;%BUrzKJ^UkCpsM)EA9QHBVV8trT&*O(9?FO{MLTFL z=5P0H+T6C^jAuX0k4U;~GM!x`!X2N~3_n?qXY$HI>x@(DHEy&Q3ucT1R6fj28wX!I zC=&d$@bJ_v^%?W2Ngl}e8ww`b%BrN-PzGH;$@B2Ky1?%GMkm#~Okj(-Admyy;qya| zOi73kr_pwt?5Nj3p=&H>81!w#>Agj z(QXx{j0r=pTl>micAI_5vUw<3`Sht?Z}-j2Wx~F8DKCUQrsXl2?W8hur42(F_ zsSJ)_36&x6A|YkY6c<2a94SXbv~d>4CC4nkDPvf9Z5Fys^6^5r0j5=E>Cgy_Dk@tS z%?c}9!qB?t6t8(XMH%le8UeNWp@Nsma~Ql+^3Bo%_npMryeQJz4V=BAqE~T?dejng z3ge{fjCHoNAfYBvsfq;G%VL|j7t z`X0sy1EEgpyD;)tS1x+fnv-?C@glP0{RCW}Ma?3qpoq_&IJAYOy3G#s`rsh5=3>`K zkj``=;|*x5HSjZC zXNvPLh372q;=+6ja|SC!R-`JcL}}wwskajjTUGTpL(1zkN-p?BA2lmf+J3WsB7!k`0Brx8^cLTF9h)r+LZ$vsZo}`OpOs)?c6$hclR!R#MAeh|_DY|9r zy+_3c%IO9h9X?ksp?an&>Lw;QeQ`T-Ku6HaK~H?E9-Z5$cZu{YU;1+-6B$|JD;%!^ zt(4l>F8}a-UkC4YtOxFHckhl4VKr6P$P_O*U!)IDory%}Wz`YeFx6TO{y2Y${SBm?H9cTWV=WWJ z`_*CGso!ZN>l@~_jkeXtV}fczfA{TUkyeD>)i3|NFGcCsBmK3HXp&ol_@GVs7PIpfULy!hi zs+%KYgS%(n7_z_}6)hblk~W#LZ@&2)fwm6xkFP%&Ju|MFWbNiTwy{{g-pV1RK`L&=RE2D z4|g;~vd8xd|teYS%w!IlT4W$&FTrk-hcTADX!P?*f1YWEIRwq$Ys%^(Z9w&HT$>} zsMD#6Df=uJrX!JHP7<>Or;e_Cf=}`!`qR=i8fBj)$6Lxx{HRzd8Tnzd0p>kSps{OG zKJkml>bUj8$u|F=``l(-aMxWBC@CGZ#FXClQZ<4|&%jN}Tkg#q8z)=>Ly{$i0`rjU zvt|QddO&i=91e?h3>s~i;+6{ z8X4i6a1wDLrSuE#W(zhan+U*Zq+8p3a))JFVF4ffaV51K^YgTso~3;Y*NmM; zx8T?y-N0uyWY(8=me-HUC9xtABvX5~%yg+Cp&XF$Bq=OcK6T*D7eZ2EmIoCFWm{$S z1PNw8HDpe5hHeCusN8kdeb&f2#=3M^A~7YwJ7FRrhq*)PG9x?JIAaC{MV}5}g#7R$-Ly%)4=IUkRCGOR|XTMjn&okRmFjaO^YF5^* z@)#MCBOBezD)*xQNxydlUyN?dW{fS(s-T`gv*0BEnk}`BdmrbmPO8q8y(X$AA}*RH%I7Av!~84pudHb&%Q5-j zt?=6x(iR?<^_7X0v6Ys#VAL}dKk^hcjI=|EY;kPcZ_w<*H`_*|N7SacaM1ERD@6ab zg`!iTm7$URV+lpW_{V$ruR&A>jrX68k4x2wo$45}&wf7o<|o(@B!u-L@bKyQBAGwy z4#}UrRAu>^>Vb6k2-th^>WjvP;Nl|i3WrjWv3ISkj{m{eAcQIW^_ndxSX@|8T(ASJ z?_$fcP2u*6uOBk-{d>^ z0vWlfGQMvysI%R=iE|A+!!Nw?C917EU*_$`;;)px?s83CRd3i_jBN)k#nR5t$dJ(+ z_sP;wG@Ad)^(3LRj7q}0b2O(b`|i0~5SYb%Sjk^*5ISZ-Ab+}DGu$-X1n^TF1Ndw_ zF|e*1)cI2%`TR&AW~XpqpFb!=3cHbS>np9hYD_Mr5}y5Y`SY^r7isA2Q4(z zazRQEqWDKT2zIEbjSYdCPi1ZOGz80Nsl}gxO^DWMY0AV<2K&OL{&^6#@L1?lXu#6xSMh%3^5c*}oM6DQGY#(a^@z<&D zF(43I9e&5`h|A$5!+UFuOH0>F3$shBV4`0#M4RSB8=6F0ZgIbq<2LQ$Hh^(kAJu=! zt8ZGXTacD{(3W{V1$j_{Jc)Ka7t6u}ho`4kF+4@t_0!mCBn z)}o%eA}L)_L?=jw6BIfll7tb3n}?*yLt&XADa=rW>qz=_6s9ziOd5sXjil>FVFx3r zf>Feewk0v#W9>Gp4GacTRr>Sd2T6dWi-{YX`v!D)kCWzG5xQB=?es5ON(%nkwUhNl zV>@xkWWWv*N+{e$(SrExvN6BXzU(Hxlx27{VYHf+LpIbTO+Yu(ltMk<;)3A(LU@ytVYFkYvTa79idMtUFhfxx?P!)2F`prNWW#Fub#l>N2s@nh&n_ zA4{#}|AIs9|A4P0ZF%fy=hDN!t#ifH<)4u2kirK~JUpjQ-J+~cXOZI&dIts;P}UeXslP6zKvpEKSN-$y>kJ^nw2tC9bv zo(|lT@?vZ!{_l|d^8Yh)eEBh*5ABh+Lzjw+?V)o z#P-W7361>E(Y4;@`sv;VKn G`u_lkUM?>H literal 16448 zcmbXJW03CL7d?tTjor45-QI26wzb=~ZQHhO@3w8*w(ZmJ@BZ(tbF0p$la(=N#>kvm zE2(5vQkCfPhySAC*&%gOhXNAMqjXaM8ZdR9h1n(j|bAOHa3xsaUpVQb^?bFN$mKV0Ewcy3Du z@-8k$`ak32WBbVi`wx;7^0Pnwe^+&aJAe9T8!-8dp8P-m^j_k+W}s`RtGffD4+(~# ztFH^%r@=P?d_)fbz?K5R0s#N*H#RfO?CBZn>6_?x^z-v0gc4w+(WBE}13CaHLhywQ z!#%^j8s6#2z4_*~82qM%VW?EZaP{qr6q7)~zyRXUfu8*DIFkvyQi}2zgVP1nasq{A zzK$~<^8~1Leh9gA7?OYdWb(rhHBCeLF_~b@=XwJtb#c@X=&{tLR~#2+TS{-c`vBYE zGBWX|sg2q1)>^5WQl6tV-S^gSSDaqgl)f0g5bP3XzB_opq(U*a%n-{&Nsp#<PXeb*#gCojQ<~*y?%~jIH!wY%g9nHSRoaSF?Kj+nhFb0uC&n_VOmpd_OBYox zmnx5#Y6>`tg|imfwPr|~9o*VGw6l}bCod<5GtgOopG#Z3FYU1yX;{uJt(#*r8r_e7 zFtr;Gdot=wqBrPOr&Auqx9S#4&q}4+IV@$;lS%g;OwuPXe}-tkmpsZwyFbf2RoE|~ z^I*n!=-?L4caqmD0 ze6gB6sXkw{<`|Cx?yb^4okCyXCb!Pswu?l=&V6!>eVjh=XD+I%?*-Gd7M;9>8h)~6 z&0J!HkB*tz&l&C|b)oTW*SdHifwpF*1$>(yA`o_PKmUNb%3cQp@DV=5e(dQG!VdB# z4zOo2dD*d^}VrwZDE>cjbvV3uXQpX;>NPr?6LUB>JyOhwrqV5Mj1Q8A=HxZxa- zQwXEXE4&D0kFPJik^cKOC{0^_Gd~wNu89<_dGZ;!WUzzZ3ld}@(h^<$4X6-4pZP0> z4cT8q?NQVurwRI1@u5c=cK!0A)|eeN43pohgBKnf%Zphd-bWZGHIQE~`m`*h=F^&l ziYiYp2Bli;gaHnZjhfJboUR`tiB7foe6NfemF%KO8OT@`0*rjk^<*{<(SKi84B6$c zSAeZ)XeDt@7mIt)7s!bPz7`HP9ftqc{+RVQxN1rHewmj8Yp3IVyy5+hfQzfO*PnR6 zhtk{-Yu&KlSEH<_;xUIck%#8F?#Q96cq(tN&Y&yCP>~SwZF+9EW+Z}7E5H4?%I{Wg z(N$R$e70H+BskvgkMrx=s0NkTo4j@vUJI?-vt>?b>ZKxs;_5=f0G)6f@U^u0(`_>iKBH|X`>9ka9q#!rMTZ#DaG+DNj4Hb@5WUDRx;OQyC`$YMi^IjCMmr8 zI(s_$k$_>i*!Zw?b0n%}L?TE;8iYNv&D5Okc@@2k64bhgEg9atc=7JTCCwE4`m2d) zotf55o`s|4kAD`L4d20r!>w61;4e~qalSSgRUGOBHl z9RTUz=#A|RA)-_XJ;fPvhjE(w=K~z`rx{{e9EixI()Jy>7>q7pDk!X2)o;7@b}3Yu z9i|Jv^->~KNaK}*?iz`k`wWk?k2H%PP(=B6#}1W+=RSZgxN>tnUk$!WK4gXlQ5YlR zTsK(s$>9-qC_*h|B?@VYC<>v5_KI>C2z_VFA`o{64(?4{0alZ{Nw|H`!{CqynYP_3XpLG_k ziP$}NfO!Bc1h;p(xMku(+}e9AFC+)*b7-cf-zFY{y5q^zfrbBu7o09H&lgsnQ0~~g zy2GlijEBH%4KeBzhNc5k{iK+Y1-<2Q>UF|@>0Y(&Q0+KPt-?=>*O;tSLw&e#b>>(F zM@%`Dp)}XMSMJ?EoMgkl7E2Dlkm_n=3YT5*wm_QDoZ>7lvtsY4O)?QU&&U>WL1boz zQpm^5oPSA<)4GyW3E#Ps%#pgS9&NNgd{L&{3U4mAPIsPKsgeU0qP%W$`ZjtthBo>w z{j$ZZ`}y)?bf|%(x(~j-JG@sY%R;$v#5BH_v+zHz7j`4+RX_0>ExySHVGK_8?ls$< zCG8GiJ4!l$_CUvA=~B4lvLPO5zU!YI$VaRmBu-~t`|-fjE8m|b--_hjHI@%Obfn<5 zqFvMMzZAUzVr-;8sF5B#27-ldl$|mdx)l)mQQFu2FIOtOc7Gu;oB3aT zkoEXW@GtHDhHTLayMa&3)3q|?*fC_}cttu?Q9^2h4(mFdWi>)r&@Pv28u{R72XTH0 zZRuM=#0U~(p`Qab%BV&JME9I}R{we>pw1JgB;y5-iwrmRLHP%hMOR#-7%AknieOMN zo?28Tc1wE+o31Am+Nv4Dye*YinTqC2UW;J%&TbQ$KFih z&(4l%v^}kxB%IPw1bwe_&i`(w`EDZ;rR4y4yR?*>qOb6Ki?AP+?18T2(HMlK=(_{9 zdm{~sd*AEH(5!TkVTELf1xG!^WBK_T~kY*#Ba=bK-yDs2kr{xCsRh;tzmzhb6>9 z!z+!FI)u7k9fl1aR<{6Rb(#qU59Ak=h_2T0ar}&kf$rP4^hRW*)_l%I!1KROf`P)) z2MGiZQI*|?s^T!TAY`p_e+dw98bH9&ELHjiE7;c;&=hB;DbKUs*7chHcwS>>?5k2X zp7QG43(FDIEQzG>$ws8!ZtSL+a~6-GO3XhBmGXD*rd@xN*P6&K%~IvQsKK~mQb@B& znOIXfL%=A0T}>ki50;ffb)L6t)Hpo7O2uKpP*QnuNkvcZ7+jf1M9EJKck{Er0rd+S z=^O6^6DG2}`u2S{E__E%YL(>)Yet6OO*dmT3ItOyJl?OsHTW3*HpI6^v($s$sAGQW&Iq+~bF@Em2$N)h_?PSD zFNSos=ZjgM*=UQLi`D+ET-=unMuvArE5e=BJ$R=i1hS?y}#89}ucRG*1PD=%dmAiyfM#)nR(>UJ0wzQnF2;OY3FpZoVXs+cy2w5;?GQ$<2e zu|#iFD=ow}--1<8ZyobjRWkurqBk9Rt{?GAKrI;Q9zBLzZJaQ;ho{E4;I!6;pT$iX zS#$C8bIak_Kk3dF92Spdm6>ggwrk&Z%+#hbn9KM1UQBdba`4JOzLqFGQ$(Mc6`_Sa z>2U(>7)j=}3e*Pz?%(KIyA1H%1{)%%Nf*%@0bM+D+(`kq2KwZ*I4VfHF!=@9FDvf( z`D5Cx&Iap(E)z~MuBMM|Ns<5%P%f*;vidnD<8)(8dNv&jv|>5$nb&i>+#`geKYw6} zs3PT6u=@HGWyd^;J@9Q$(ot!|lp4;Qrkl549^Q|)eBMOVeorn*`w#^4TIQ!@;j7&} z9jKr9SzUF3jZ=DpFN7>#&2XI5qjeoeB~fm-glu&dEb0p1Vc|JcV|rPadNR7eIg+YT zLWliky9=Z8uLXGp{|#G$P#Gg@h1E>)KAdDmO{b&8e2ke8G}t7k_78@NFc#F0JXn|K zBvx!abv-#UJu8Tw>T4$Mnk!cA>%@Qq*QbZ};0q`@1DY5aSuFp7Bp-&rG7uC;x6rA7 z-&=2G!#I_&T8pGOhQO5XUKHg8{w~_v^~rQ=q+?je+e{P>8?c)n&tiGj12TFTV;$st z=imv0loSAktP4ipl*=6htfl+=WF}G)C<@j{hH6KSSnUA^irkKXuN>mhbMO<&)L9qz ztxRgH)b)$4gWy-G7G{hdY%H>OqmH8Kiy4|O$&Qj{IOnqbUcP|=?pi__3Uy1aLIaXT z;d4MJh&5FK?Qa(sU1p@pZKR<{N-QlW{S#Orx5zh4 zlU(^I9ua#zo)9`cmCW5Kvt)91pz~0b@&G?Uw2oD%2yV27VTW}>Eenh@0=U_{(9%HS z*C(a5G=1JvO&8Gjti7os4ro{Vz)^K%IlS?fIYb%(zC8>f85Ll-9YkHMM6S$>y!cYT z1!SeBmg^~lOVX+>Lz83WdPQ++h8if4oWH1slf@6-32CtPG{~*G_I6H&G&0VYX-=$# zq7{EUG?nMAbXe7^NV!fPq7}KKeYt2&Fi7xVgvFQ%z4Z~Q27(JT@Cadr_?d|J;tJeEN9xPppq8Bu@=l-p?5xgbM{uJIeJS-PkEfhDz|l3rh3e{N z6Cl11KlvT7)QQ+Xl`qK>!Ae6u1K$q+%+?(XC?gGoN4>bRfpG6Fh@Q{H2N^RdDSz> z9#GX){2iX!;5fyiR~cPQ9@+BDz*xjn<1~BopQ?g3p6ZM_OE~H2fF1hvX;z=qfH<`i z_cPC*N)R{+*jZy%z|hj71bRpZ44Wm3Hy?9bl;fDtL3zH{a`}+!);WGv8VBmF(Ag<5 zvs#%3Mf|+(y)9->pV$x9Ce!7TyyjVegn{&u;Sw~l<2as_WBAt>PSk88Hc28D;TW4s zN>HnoZ$=YxHg+OkcX|B&kQ=@aCMH^UV@sD1ZauA(hjO!9ebL?KskYqa;piGWM1P^y z1@Y3$$V5t!4}m9XMbDLXadOE(9L3v26t;yxGY;P}ZbMx+#Gh<*J5>WKi==HW>GtE- z0k&s-L-LJ4?!0cLr4X&4>&$rrPIuZCHv!tRJ0`AyV#S}yU?7L`D3Tn$iMEOF*nn=M zIDL9;bkMPXrQN-JL+W@>%o%^wD{XBlQ>A)+uI)nFTA&;MYtebFrK1q-&0p9k<5VSF z@?(|%Gdp164bk76uKRMb82gs%moxKY-syEm0U^sI38*rKAiLv8C(>6E0j2T zI4B48ksbj&V)aN9gVR@x`Flb*{v`D=w&v8`MavBqkxb>4 zc~+y2AGRQ?Uck}=nxIDfq{ zd;hm3d8#P^Q#M5dNa3yGk(4=vl=k;PViIqw%R~LT4L*_kZ&GXvChe3)^_otV+Nkxp zwzDTrd>n_#DJ5!~)aSi&x9#_%1TxNL3@+q9!#3q%)Z6q{Z&kvpb?l?tz!i;sptI0` z;AF`$Oag5*)Xjp3N;T0yVn{^qBdF6h)Ck_Ue@nNQF+6W9>e_E0mrQRrBSGbVt!`LH zuaedju6j`$BvedYKBHA2ecp)#x8ThyKcL%t9zLH^{mpC>c*G-&;?>pDU6Zr|Y0WCHAfrOseG`WZPzMHfc-H0N> zQRK|s>|TkRlvYl_B)9L{Z4^4UG~h9l=gDh#iMZu-lkUBzpq3oxA;FJohjMo;j41a3 z22P0kqTrNq(`H}pKIwGX*)WfYX5tw$?mhDxE^3s-%sce9W=+wsS7-imPiGXkgDsM6 zowj>a_V}8QTB;`$Cr&tw#D@sFvE*wgI#!HW@wE`#gc6z(W0-fGSMu^44^NHXUmRo} zjD*Umr|s!tcFJP7>E7ch*6h#Me$J)$ULRJ>%&@s^%fD<}tyI4m=q(~k2Yj_PL@fOF z-`+Ipi3#=$i7;V#TQ|nmYadI+(l%B@20A_0h7lYrR>tmoXD6#*RMKK+TbdvI&Ek5E{W>TYiXL>cS-q5P9fP{aqMdq{g1fQ4~^4 zB<@ZMjpvP~FuYacPKg{Q#;1f<_zn4dgEE#2)(9QXIn~_#_hpayOcnnri%k!k&iK@o zdA4n#?9<(2(yYmL*41h6&YyLQs>SNJho)Ae4!c|Z%WeB2;_`&pQAN4O*{8vR4$N0D zhhEvoTE#EP8kJ#M$`|397jd)iTV#!BqUZ3uP!M?TMyhw0K{W|snIa!*7SecH%O+)y zBlwJ?4(CCz>xC!&*J+O?! z=_McM8)pWN&%c)@;2I1TcTq~;%rhf|p}0Xdve(0rcre)J-M@KB$(rDbbK2Cf84qho zMTpD#+f}g3mc3wKOn`4>|5XdTK(4L-4S9lNkMn{)-voy7QmHX9to!YvVlg8UCxLVY zCbRy9nS}dFo>PfqDk2WfN!t592XAU}6~Kvfu+A9M7_x(C79i@#lgQ}p&DhNj64FI0 zI4sc8w=JauYjuSK_t@mZnt)=kVrjm4!>34cswwp-vn0%WlVZmhF31ZR7Ptv|}&DCmE8RN2m3rG}~5+ z07c@dPb{WT!B&%LSTsSexqny^i$20G((4$QdvnGZQjq(XfnQV=5rgQdCUmabx9?zK#wco#!O>KX@_k^Je2Q$W*QEtQY*y# zP3qZ{M%>vS@*3Ru-N0RMn#E>5)5JJTgIn)vmpeMhqMH8acp{Uxy3Kv#BhBFt{omz% zZHuxMCX74Hf`Hwa?!BLx(O6;Zh{oh1 zk9?Tm2WBR8GEiCj!Ywjjg5qkgkPm)OBVoAa0Anb-81s@YwA8POu|YybRh{Z;Y(#=@ zawHH3n>7}m6HFy7o)u+jG#HquHrn`{XwYP9Kbp>0P{)$LPq58;1P&37^OF|AYi;g( zE16q5W@YMaw(_GY8gy8eh?GsirgiJ?)11BHon@2 z2k?CyXF^c}@a~onwJ2e|$bbMr`g-rOR3+#ozPd#1YrHd=nv`(%_VP<2+PIWPF9N9H zq+6r#yodRe~GJSDxd?Ysbs(A`;H~ z2cshGOmhy@h`h}Qg0l#en1aR&tgOq58Og{h_aT_b1|_!y{)7i=8)AC`425Fh09Ef; zN&2hR2k%RQ-Ib&6T}w&$)d#LE`~BN1n`xW2bBb!JP938R*}P4syXwi|1=W+q`;6tI zlglY7sem`;(Egfr5sE7uEVom^we!@iKGxnxZ#qanxh7>x2W2Z37J++aIyhFb6i6i+ z-%r|}!ZM=pgJka17$qBs#RWv}k&v)mVoP!e>9*5Rd|tQtLODMmYupBbTRto0vVNE~ zL@KHU%7Ug+km4GhdVO;$7N^1Z$9eElbk#&HRa2IB$&aL6F+ZZ~-%K8_&lArt8ZFNa zZ>>@-;66ED@^3F8hF{M-hN49}Z?RN8x47e(yE^-6Qr1~~``1k+jokRzdZJ#T ze?CJnKrp8Y165+f+?bw+@_Y?%u-$k&ci>&Vc9##X6b%V5UtVQ*F}#yDp3kS?#jw{a z&8gS$#pxj?^)F+5IVA)w(M>1t0UW|k8er6zQ)6(%j<9)3`6h+jSR~?fvI3fPVJVM+ zwCN#RBLikE)5lbgaD2zd0Gq_Nk%QjTkTEbwie6*tgDY65K~K&^CzhMnZ1OIY#TcIE z17&d65gVw?>P|QcQFP0(gEe1c%<%(p$kg7L)n0cfC3mJtR?d`sGa2(^aQ6>ISNN?a z-J^~O2SXiYVn6bO#&kDj*^5@Dq(FM5XiX4+0uyC;ECk&Q7&k8-5s%231WBA?$q0a9 zXMy6;|QB#W|+(v zO`d8rhA}$HuBy9OscnOYCeZFokYRpi@1bRp-I_&4qY0mz)dv8 z#psFjfRS)w6fSp|gt2NY0OR?&ol6BnpGjYkiYa3CnjR6X!%qwmPg)L#a&-Nb{oV2H zO_$lCeg)Jzczqn6q+{^q-BgdzhMM-Sbi>iS0zdfdq6(c8zG7_{jgca5gy~#3d7O0} z#=MarJ;x^wl?0x2m=3AZqWyJqK?Ge;x4qX#DpG8$R4pVvS1%z2%!}@Idi(P#hs=l0 zbeX2*YrM|Dr`N*!Ifv|L#sj|afrtl@aUa4)SDlXmz+EP`&5FD zH^4h6n@v8B&1dA=lz<+14Z?%#FV_l(PX(uP^O83`(#wDb`dpW)0(y8nGWxbRTN4qg zbPU*fXZ^u~Yy|M%@qq=pIZX~a)a<1{R}ixEQ{PwCmvJcSi??WZ5K>LnI@Cj9K={AN zbtd=RRU~KDiP{d~1tc=>BfLc^!n7cB9`KcuG*3h%hC>>Gc-FqGJ#D{Az`w4n z>;DvS&)uSF;os}x#=WTf%HmFzK>{QbkiW!_RO6LL>ck8dr}b%)tf7M}m$@%eVNR~$pjWIY>)K76S&6D)ErTYo$!HbpW?J(LEb1Oh$ZHwXN1VXL70mn0hQUgw2^-o1YBD=iZc88NCXQc; zG}na7)C7!ox@$qVt+U6?6dipyH+rh4^T|;1{c5 z+KB?(kr}w(*g+=mOvH}!!q=G z_xI0Tg_ykAxA`S9xAJZ$P^cB4EX&1`Ps=_2hRR4R!B zePQ~o{hbjJpb3KMMZsq1*J@(r{ltu{JFT3YkH>GUB1~8#?T>dK(ZY)hUEV?TAckZEm<8m!rW?ciPRR}Sl6Yh7Qq z@;hYn@cSF`r9^T-)LuFshVKpK(d^`c`5B{_nCxn(lLIv0F)EirmwNF7Guoeyd}Vkm zve@n34B@6edk^VE|A2|r`k( zRg-Mi;u||Z`OySCTK3@T>(UrSTgPBLBFc4pTFx2xHmpm;PO3L5{mkDGSOUGEZ$3!5 zLj6t*e#X8riT-kd@x-b6y~G?N@rX2u5QNA4ld=4cAiA!g#TjIOw^LMNR>9B~k5|tu z6}X36Ay|b*C|MGbBT5Krbc;*8Q(0;IU@;5{`tp^#?0HS14m5^2BAtv7Jr<^r1yQGu zP|-$dQdV_YmC&%Ml2j@pjzKzfk)XN2JhaOcS<=ftV9^@Nn9S(0f6rT0GqeX_^pl{X zRfjUNPfT@zW|`PwNr9da2U{AeQ|S;=R!Bq|Ku^+a?TuGF-A+MX+36CbQ(Z{d2zybS zgye5ZsWq(9HY{3t;~hhCbOvo9fcxL?@`w;9S0%{PnBWwuFQv>o!S4U=j2?e6q-vl@?G zk~X>MqMKZrw9{AkYtz>yuM4k*q2jbBOI6D#~xqViag*hj9#4yU#j=25+6~h{c5z2|Mh?PZe?Tuj&(Su5)z2AX0V3TOflX7$@yQZv$<@WkFiv(@D z#q*Q@2#_7oiKZ-KGIjCmroEgtO4+{>u$!qm+{V4gJ{&}%Je;oN$4BHJ??a?9w%Qn+ zA49Rv&qUp;b?CTvTi+K}?3$;dHhk{7-etD%(>%^w>PoIidH*fMSkYjz`n>h_E22eH zWP2%hnp{~e%kyA5zbbm8eiQY;R^eibVl@I|K36Ttm7u7d>!RA5qLM;xI$|Rk0aF2) zkQ08N{@vimdl`nE5-VHIvD{d2{e&fI;$>lRo}pCOSZNvkO>;G~q>pM-A9rCpgMP$G zWLM)e+H<~}Byt%;WYf|m{|=_vht2D&3hH^7!^#E@E6t+KD;tAYn#PR=w}VOBPmEg| zFVg;q-Ik&r)BN*&9N~=b`kPs^IpEPMVa>&Od2zB@(r!B?A2Ej(DT!k^ul2^#y-_7Z z7?2%^K~~D#ZBVWkJ>OxDi3|>V;#!jCPOm0`OW1~)ECr_^6%~w4oZvjvP)Dl~9p%1gogfOFu6PbC5kIiBpYj;{s!w655Podi3k^ zSY;L!&rb1E6)u%b+IgZ(lfz>!iiJVA5lsc&LPq;}hTQHBWee3>ZNv3Z=n~29XfgUZ z7@9a>q^mm1nTO6E=P`_GuWN{RTvOTsRy`GBffl_SeMb5?X1EsJm&1tL2X=EcYX5|B zgnsne&jRtH8Z?rnneHz$2@{_;BUU;!Ix%egsGc1LxW=C?kK!IH2K&VTG%km2N={MP zDu@Y3Rmk8EE|=^HZ+8aS`10U)bO|FJYMbA?RzVEQBlp5+_bOZFBdnZKqtyEfg7Lyl z4adqX_*%-0bpw<^A!!js3?@B)M@#atJDMOHk`m9qL}&iI^s8^z37kB^6nF#kbL}L$ zhp+R=>NZ&qczRWV#K5@2uE2C-@U7c1kfcUQ(5*<%NA9NzM&W78uQf2@albRKYyS&t*#b-9 zCxDExUpqG^6>dJ+N<1@{U39t94_ILuf_0O~AYIG;^>%!k4{xn!`(kA2|5O_x$J9}n zEmE7PW<)Uw%m4_GH>Y)d(sb2|WrJb|iOJ#9+XSU+53T9)rL0@K-*{#g>M~E$tPw(A>A*=(>X}~13FV?jQPpzRnmN~C|6*YBW zklLeHW@NO5Z)YrGuPwGO*R`)bsj5{y0u{S_4cE3JT6iVS`Sj<%N^~Zz?qHb8VzPFM zTOov74bZ1&W@=h`Fzm?fb}Csc!CweLKugfg|EA$!Gp|#fNaj8i*c{;o+uGdA&cPsH zlIW9@|A91NkcXwDplXVQX!DQ)ila%e8v5}3H)1?N3CNYLwbag@wLZ|9`)VK6V{j8Q zOd-Hf*EiA7f+HJGAVLeFm?rHg`Yc~1X>EkG9^Dv>XypCXxJYw0NMF?z;Ru_?V`rr9 zuD*C)vplMXD|@OUTP(PJES$X9Zu-u%ncLiKl35Mh7OvM6+ZV>pF5Z-j^5&oz|MGOX z=GQ#pe|gY1+g?x9)b1o8Ve@=?e{p-crf3tlx<0R?{@!#!x5dn!(bpKO*TuG#9(Adb z>mMSqiR!|`@m#6dYI2BL(0(UDHJ#<~#&J1yp~+OAD2ozOJxY`SG^+iZj04%zZ`J!W zHHkAIL;r+~$hJLV(0FbNIb}6HTpN+p)`3P2D+kuBpz$q?ozCf-V-sa{4u8VqWQ%m8 zRp7qc-EU)R%2NQl-9VK_Xl`g~qbSPDGvyx>IKg%hk!W|WysrV(81RSC$C@~NEhoAo z6#-eZi{*D9_f{)6I18^4|F8fp%16TI&tDp?FL&%rBYne-$ly1znJDh@%@~A*!?pk^ z$|;f?=ylF6FwFvS-=0y;n+I(2l+!Mxk8~J8OUemtH6*ps?Hp)#bUPns@EdOSAdcnvO?&cBxRLd z-c8puf_=_Tv!OSJ4~py(@oo&m0@>14&?UwKtrqYuz$&~t(n~zbfzg+$NuhNY9P)Bz zr)rGPm8i>=b#Fb_lKE?m*Y2L@lLZT{;;J_t@+UYN(c3jTUVFHE5W6{Scd{>ZYDAi* zt$FzH6gjxF4a*w@#CsuwwB12*hS80^S^`@%ZzpV;1o1ad_Z^1enve=#4b@=3E znJ=I+l%sH}YHV%F7)xSoCN7m^9iCC9eOjk-_nx{9)kb4cFt@wt*J=SL``S%4ACo@n za1@J9nI&*4oH8=SA_pGTclike?rlZDXP+PW;pqTs!aY2pgh%cl1IntO`9w}q&VnQcj9M@Rsh3=x6Mu?_G{(GY zby#Ytdq!xOqkSHU2#-)$$&dnIFr#tJCo9c|1RSm;4BWCwQ%Jm8qKHv%swi%1=gu42 z4ELwEFBh?KMk|r20=Qf8*D`JY7!R2ue!tCGUl5%)`x@lA@+UmkXODnW-V+N7$mT_4 z);HKUib%U=K2W77KDq?~q!bvC{;%FXungD)p|19n*txf1w9Sv9eG5s+oPXGwyv~a& zs#faFU&SgRy>F=J1m5S`_dTNj9I4t~>o|fgoRl>1|J_9|Wh_^1Z=7N5@$51j3?PiB z#f^L-Zs}MbTD@e!Y(S}rA{jAgrXa}*j0Da%$W##b9^8;KU~OBIOH^?-e6^WeNihdT ziPXHKHoG8~Z41%*(v4TfPe&n()yErElCgCfxz7kfRFt~~slt}UCyq%BS}GI?Xzz{} z4MRcUC5-LX*GhQwV>!%c{ldLUO;Qql{iqih)zZ{waPl(n+ml_sD@5wsG)8JFc*qe< z2Gy+~+JJT`VJLH?u--2+IE#*Wdy;>EY%ZkHp78V_fSxYB{#?9Qi8FJkZmW0i#TxMC zIB9xg{{(Yt)+^O|UhHl71Cy+>sPC8t$2pmYc;f+`#toUuiayt^J!hihFMz{jg0Q^M zvga}|vw#J>1hc)>MZ=BNAhNQ5zNXyRU>i`})luG<6Qxfw|5Om1ogK-1F9N>g#e2&G zu#`RXE>=j(s-U0D8}o$0{{CzX^j7c<@H&|vhUVPS$+1hO2zs{)0-3TOoRMdaCC`=F zAKR48D0?_r2reI}-2t=L6SP&!Hy8BD5=vur=)YLSHhvnm0Gfz;Wzg<-xm ze1%lC6#&fi{q`N89g}Ofx&z~#eOV8}u zf`^kf*Uv!`6t_yWNwh}K@9RcsJ}ENiRs6n;%H8K|G}N=2(kwHYi%k^Ws50a=R#h8~ zgxeJ@+?k4-PVkdP&bXyN7$(Xg$%RzqAk95;xoe0006BO)ynGqiyuYe~Co;tR62#YB z>U5WL`P<-{z;sDowb*n(;JBOFgyP_hi%r)% zIJ1qbh9DzClTf15Zvo)=>opRhCN80LG}fI6x;d&R*@=_v)y7zK04TP216M(Bpf1+QvxAP2<3 zmzy)@XiCJWn8_dtKEs{-%P&}7Moi%D3ZV~3D>y#|u`58zKe*1TG2umydw*BW(Sw?X z%go}e=M?9Fw&%eN!dL&;iMTFP_U(|N1|d5Fsmm!XqkS7b@V02=`*uz@C9fgHFky^0 z6eG;jm1aOZ#3LSL$#C**5_oqQK3@}2_#9{TvzqYs9Pv@)w7}MFTK!n_vB0(YQt$|< z^ymy2L6zGUc|E=3l%oCyF*SgCE7Qf&y#OZj=U;e!0s>iV5SP24b4wA)6slbkKPqVa z?L7vIXHveS>h38t5DB(K7mO+b>$HL{jmcsulpV9gIQ+x8|K(jy>TN9DWHsRd-ESVJQ5c}`_fCcA#g-Gmp zL9`a{aW52!x-Xv(liSJ&(t9irNI!(V-XjjUhIaKPVf1eo_X~Srh+bxvmvd1SB{2vp z%wybkv@OTW;}j214>YImKO4Mx*VExQxs$uc1oj(hCj=~pPXQce4-mYN3K~rT&4clb zV5Q3QA)*t>xFc<)$Gw1SYsK|7B|$F-FRzC1FnhN_gFTQu|AQqEncRzh0Z6B{M)+C< z?u7TwN`dnG0r#=owToakaXE%{HxfBuQy5p=EZ(YlaaVUr2=-6PP)+q>>hzs585^st zY6X>ID{0?7@ z=h44eJX;z{S1wJhYB!nt&1~C_TX)&^X*2?!zN!SN1c%|6_m5ayicG1(l*Fy;#;DzL zNcKsqTvA%YiB)@?rim}#*ZBHl+u8^>-_NuAuhV<%)0+B}?EN!mTw3Dx*D$=fr${(d ztqrI?OuuBAvJdwwJ4{1s#VOB+F3a$^pK;jc!^>uQA}tp0M?tagM(|)71f;VY>(F>& z5E?p1FmY%imeRp8ba6QUHQK$*NNA)javS{-@X&e zvtv0<#1x?N>6t|SePNQkwwJyq(K<7g@jJmdML2nT?gZO?nqU;AwC0{U8(w-dM`0*L z>xv;G(}c96S4)A_{IyijaH#&KvIJB`3D48TL;Ez}==}t%=T7tmytIby6cLutzXBlT zg%rq64!uz)`MUkLozQE9WyU#Ua)^a8;n>HbA^Aw^JVulCABWe7wT?Bmsmbw%BZu9l zbPU79H^?Pg&By<#ThlePHJnSOr_bI#q72{~2g`-%U$yB@=|A~a`97}QGD-s2vty+4 z?F!Pw8XCm3MuY0uqe?= zSwbc1gbRN{l5YYTfwFkLBUr^3bqOrHY;3XDO8DMMEd;wD9o z0A%eejz)}V2c{GY%pwWsd*cO1^>_UGe)vX~t47NI;2jX64Mv7}g@FM$!j#4Sul`SW z#=nm)7`WpG(9a%B8>tW}6R9039@&6FOZTN8uXkrKX23C2IrI@q5>*s#1UC+%g1N-D z1h%AO31q2m$!!U~l3m+Sw_b~0H?7ax{}s{iTM%x5NCr}ZRf25-dkjwlUCmZ4u4&Q2 zV|#9=YD>HC-9t2}IOGtf8q*v#9cqKe3*L?AgY^yb1@hqodI7oy3J1}Fc!1o9@PHhN zc!8)%*dlwAgpd>K7aJiLDHk$>mFLl?*(cto7^e?279nmX79uv4q)u=zd4NouMx1OEGTx(5t}jn}~>T|FSoYs}qzy6e$!tlqAX&xu>F%JdA>+;zr4f z^e7*Nj9Ks;rV*SG_#xFH#h6FpcIilIY8i2Xp!d`Cg#4)@x5w9&t&5KU(>mL;#=D)k_n!<{DfwCzCKT@`SI(eT5`YzvG~WPcZM|H&2*@KD4d z>ZZ&d%IB$Z4elssli^YR@DKb_?x&>sq=6BfclO8%R(xFRQh)rr5*PyK-r^5}4GT(l z(-Y?(M64o)+Qlq4z`myGQhFU9)CHLk2ixKqNeHfUWv*$V*`7&Ty0JGoEhhl9&h-d* zXUnhVqeXXu3;AMkfGcaZn+#+$P#2ewEuZhXC^A9#t1B5K2yqA)1ge(y_I3?h7njx@LRV0N zd5f!)3@xoilPpGM9cc?qi--H^K9$+G?rEJWw0(?itnKuT^gd8DgWm~inIvlQMQZ7z zQhJ!lM(oKppOa9PBNCMpe=5h!E2pq3NB>q%a#W7HS5AXjj)+)JkXnuzTTY=_j;dHr zvNS^e!j<@Aj@93+Gklxb6P7tJn%U=QOqZa@9;Kc+WqCxG!k9XomN^Jv;sAHd zkaN$L1KkoEq1H2~*;k}Fbg0>zq&c{#+25o&{J7B*wJ|Wc(O0!Gbh*)+wK2H4(cif- z{K?f5z%|g%)mOkZw9nO>z%@9})!)E1eBaR%(J?UI(O1zibWU{uyLCXlb%eWh$h~z8 z!gD~xbA-%u$jEaH-E~0Ob%fn@$k}xa?tMV!eT43P$m)Fz|CPz+we-=-$dIZ(H*%47 z`LytqPrY_o7p2jH+w4f$?2O%f{($h%u25c}K0$c|{f`>d{I8W5{Qp{` z;u^(eVpm0@qI=ha=jrR%ebO=Iv}$&Zr>s%Q9d}aan6^>PKh^cJ%LQk1&Zew28LN_i z^DAbass=T6%PSTa%uiSzQJq8D%l{8;TKoUrY-S?53a(E$-=e$b@!mgozD_vWqN@we z|Bo}QWPIVw{~yaPI6h%_kN*F<`CG030)I4)=;(s&#O!&yvAS)K8t;Pb6V|t=|GR7A z#uXi&wR6Pzf8#Lk*Bj=s9lzdfc_`b}WQGgXi46R*CHJ}6r+;}OrvwA{_SY+o zK)H-vy{l!P`+NG*`*x6^PGgHH4!dsolgU4RKj@I8Xz~F6o?quCX&=VQ$Q{w01;M0? zKe|5r<_7CD z=eO3*x!r$aX2iFh3;}xNfx0v;SwBfGG+@Z;->HhvqfF4r__4$mU>Dl_1w;-9`~5rF~@!3;r~xP-hZvOfOx)A z#>8O3N{L{naf215f>m=bzbp7_(ssu&cx)Qo-{)!)Yz3A@Z0uZaM2yJ8#OGlzm?JO5gbrj~@)NB4@?>KE(K-$w}{};@dKY#K3+Vi64S<@!Z{(I{7l=!p9 z&kjG^P~0f46i13(w!hEDJga;*Eb z`!n|++@H8VaKG<9>VDh(y89J#=;Z$ei=GnD5TesW#|Wf)^D+9NKN4J3H5PF_t=V+Z zdeo8*h9+8&Zfc?>>1|E4B7MAx)^uy$L>szyXre7W|81fjy+RZ1>Gd}@@${~PCOXo) z$#HZd3)V3@lNGG%(3PyIbvyJTOJAWcN@Uh!FqUkx^&BuAvc)G}0~SKI`8ZZXw$*xP zum-ZdtPciTAUn$XWb6vrS=JX~f5?M%9S(=QsdYP?K%Odn0S0-Ad<-tBtS3W06I^FK z8}d2eR_n!(uK~APZ-#tl@SycxkRJ@5wmypdWV{MFtYBUY#g-Vv?5AEBj1 z`$T^tRKca*sn7gt%s@XUD-t>bij-4q-ilku9^;QJ3Mpc`HJ_EX4TGGQ-Og)`c~qm51<|gp7D@ zp#>Grssv^#A)&M8>ulnDM_5t#Al`#jaFpZ<#YJ@>!a$w@kEZ1<@PGs#L~kxOSz7jj zEhb?;W)eS}0IQQuk4~JT30>4rFJ3!b+77}>$_>v#2FFEnN^%(ls*o80pv0Q>#t#%H z@`Yy-FXQ9ULKh{Up&oA_A4B!(x^9&>i`+T|eD!&QOLVd(_avv-bFX~4^>o{%mzzrg_i~SBnr%DeE|i+^}|8?kaV(Z32{`vA^l!sp15>Z72z52FgXf z^8ZITvJ9eXBT1~iQjW|Q`Fac^ak$^N-vI^*geh5|*CdMz;n16gV_zk|Z7q8tFfCvU zJK^Pptnn0Rc~egGIAK}uv99VZm2WLPezQQ5K<`f zg{8Ll|GioPYfNheMj-7-S87=w4N0WxHP`1V6Y)0M&SkYzVrwp>yfsEF7wj&T0!}dB z)R~gGfP9pOR;GY_e0~K^^oJ-3AT+m~?Al!{>>5gNe17?OWz)$)sMH*xuQiB>FT2{i zQ>6U_8}Ay~r4li;jzG+$&?S12{)+<*k9 z<^SX#xY|jvlvTxt(m~C7{y{3g>7TX#o2q$xQO|fc<%8rE@A3=UW(o?gVg?gDV!0q6O!{MlX$6-Bu_m&0ms66 znWS&zr{O_4O&{2uCLQvA?xC5vGZ}KV1v6)#oTewgIMSnBur0PtM0&{R5t#UEy3I9) z`LVP?3f;o}sz*7g5qdTxJl^gk3>;8%SOPH@B)rmFOJ)m6?PlYa$y=RX%;}KId{m9R#2=LNwosF@OTivgMqxpRGe}5=LtAn?VVl6VWCFLD z7l#^^H8jY~42hR)OoVF#YDW(md!g(&pJ;yMj|UBAQa}UH?ED@%ci=*(q~Opn>kE2Q z_4Kgf|0kEA6ary41A;)^Ku(*nirvP!Y>{FZYBLXLP6QL~vRL+uMlZ?jWukMV*(dsn zL~~KA@jU)(UeoOz^4Gkw{fJsYQ%|UA7i79qO5=DOPBcWlv%pK!A+)*F`3WJ}t9FU3 zXhC4xMV7Z%5RjDs0=&vC4WdvD?Zi5tg4@xg8-GLUI>N$N&3aS4bHrp%3_1u9wqL)i z)XQLsI&{Hd&bQE!3m&D0vd!4D`l1$rt_{3NS?~lj#|$GN5RmvP(j3hzJOk=+0B*2v z)Bw133RMUM%wu_+$vbzOy?yk#kvR?xGsg-ipX4wKyXqd zROKp5))>tNy$HByaEHK%$mqd>-{Yoj`oSBK;w>+eZ&TVcj^DyXjo{DDbZ>vS2cCWB z(6&~GZ}kUdN(*2-nI!hvbnVy@z2E#F394OZD&Jb04}`Tgaj?MoY?1`{ejE2iud51% zQ~J0sijw(hqr_Ckbj@pm$FAVASKY(D4BS0GYPkSMqSDONRaFH+O2+jL{hIltJSJT~e)TNDr(}=Xt7|UhcU9eoXl&QZRR<9WomW%&m)FT~j zTgGd3-j}Uk%CRD;$@X)NNV9+RJbifYu>yr{FkO;p>_&njI> zyBHh_72bW;8}oGeY0gpHOxiV597j7mY<#?WMmkf5x~Kfk*re(&tG_mX<3&2cON*2u%V29tsXUv{#-ijs2>EuNH-x3) zPBpi+V6gI=wn}u164_j8xi-y(B?Au2o;UO=r6&)i5S3Mx*)*{_;u}~i4dh$`VgUS- zMG6t*?DXDYX0D2Oj31MI!HF>|aG8rjrOPnxHu4wZl;!=NGjjDoBpXf?ntrwt^dqxm zs(lE@*QB3NH)!`rH)5kks-D89g@UX&@DU9jvrsY)aI=9b4nPy3bfdX_U;#?zsan{G>DKob2LnhCJv8o}duQK)qP{7iaaf2=K`a-VNcfC582d4a z>sBJA*%S|NEazDxXcGPW_uZ&d7xG`~JB!U>U(}acUSn=FqOA~(pn^!aMXRnqiL0;? zebEZYouRv}-0r;Dq&z9>s#Rt1HL`0p4bB)A&sMyn|rE_9nh z?NO*RrjET8D4s(-`nS{MrdYtv*kyCnJKbsftG2D#ia@;42!8xd?a3P(&Y?vCf9na< zQ&Ni*1Qel&Xq{Z?=%f0SRqQt5m|Myg+8T=GDc)@^};=tM>9IDr7hdvE9-M@@<0pqv45xZTeNecbL- zWFQt4t`9>j8~X%lz}%We>Kzh_=`XO}!;4!OWH?=p*DOs#Nt({k^IvtBEL~Qafn)I^ zm*k{y7_bIs9YE}0B6%r`EIUH8US+MGY!KQA1fi-jCx9*}oz2k1nBsXp;4K<_&SN}}w<)!EylI_)v7}3&c)V;Cfuj*eJ2yc8LK=vugqTL><#65r6%#2e| zdYzZ)9Uq7)A$ol&ynM!|RDHc_7?FlWqjW>8TIHc`jExt)f5W|;D%GC#$u!%B*S%Z0 zsj&;bIU2jrt_7%$=!h4Q29n*A^^AI8R|stsW%O@?i+pN0YOU`z;TVuPy!N#~F8Z29 zzZh1`FU(q31wa>kmw{$q=MY>XBprL<1)Py~5TW4mgY%rg$S=4C^0qr+*A^T)Q)Q-U zGgRb9%MdE-&i#X3xW=I`%xDzAG95!RG9)s?v_5+qx`7NdkQ)If5}BoEp~h}XoeK>kweAMxJ8tehagx~;Nr_WP?jXa zJ&j7%Ef3w*XWf?V*nR)|IOMrX;$*$e23m?QN` zk>sC^GE=h6?*Cr~596s_QE@>Nnr?{EU+_^G=LZr#V&0fEXQ3IWtrM{=t^qJ62Sp=e zrrc>bzX^6yFV!^v7;>J9>j;`qHDQ4uc92eVe6nO@c>H=ouLQot``E~KLNqMqJ7(G+?GWO9Ol+q$w z!^kMv!n{vF?RqLnxVk{a_Ar;^sw0@=+~6!4&;SCh^utT=I zo&$CwvhNOjQpenw2`5*a6Gos6cs~*TD`8H9P4=#jOU_`%L!W;$57NjN%4 z39(61ZC#s7^tv`_4j}wMRT9rgDo*XtZwN-L;Qc$6v8kKkhmRrxSDkUAzGPgJ?}~_t zkwoGS4=6lsD`=RL|8L3O9L()N)lmEn-M15fRC{dhZ}7eYV%O-R^gsAp{q4 z!C1}_T8gy^v@SZ5R&Li5JMJy+K8iZw3LOGA0pN1~y@w7RRl#F()ii6Y5mr~Mdy@Kz z@FT4cm^I&#Fu_9IX(HAFP{XLbRALqm&)>m_we>a`hfv?eE|t z?YdDp2yAhj-~vuw^wzVDuj%w?exOcOT(ls(F*ceCe(C5HlN{lcQ;}|mRPqFDqLEzw zR7ldY+M6xe$$qLwekmk{Z&5cME$gpC?-8)f0m$rqaS|mj9ATNJvvyCgs(f2{r;2E!oy$k5{jik#(;S>do<#m0wVcU<}>)VtYmF9O0%(C>GDzPgh6X z9OkQLMR~y7=|MtaU!LDPPY7O)L{X#SC+M|v^X2CZ?$GS>U_|aC(VA(mIvCNk+biD| zSpj>gd(v>_Cbq>~-x^Y3o|?eHmuC?E&z>;Ij`%{$Pm$hI}bl0Kd`9KD~AchY+goL1?igDxf$qxL9< z4sW@sD)nwWr`T>e2B8MQN|p*DVTT8)3(%AZ&D|@Zh6`cJFT4G^y6`(UdPLY-&bJYJ z*L06f2~BX9qX}u)nrpmHPG#La#tiZ23<>`R@u8k;ueM6 znuSTY7>XEc+I-(VvL?Y>)adHo(cZ;1I7QP^q%hu#M{BEd8&mG_!EWR7ZV_&EGO;d(hGGJzX|tqyYEg2-m0zLT}a{COi$9!?9yK zGN7&yP$a|0gL`dPUt=4d^}?zrLN?HfKP0_gdRvb}1D73Hx!tXq>7{DWPV;^X{-)cm zFa^H5oBDL3uLkaFDWgFF@HL6Bt+_^g~*o*t`Hgy3M?nHhWvTp^|AQDc9_H< zg>IaSMzd7c(Sey;1SespO=8YUUArZaCc~}}tZZX80w%)fNpMExki-qB+;8xVX@dr; z#L52S6*aM-_$P9xFuIui;dN#qZ_MYy^C^hrY;YAMg;K`!ZpKKFc z9feHsool)`tFSS}Su|cL0%F;h!lpR+ym|P>kE-O`3QnHbJ%gJ$dQ_HPTT~>6WNX41 zoDEUpX-g&Hh&GP3koF4##?q*MX1K`@=W6(Gxm1=2Tb{hn8{sJyhQBoq}S>bZT zisRz-xDBYoYxt6--g2M1yh{#QWFCISux}4==r|7+fYdS$%DZ zXVQu{yPO<)Hn=TK`E@;l!09aY{!TMbT)H-l!(l{0j=SEj@JwW0a_h-2F0MZNpyucb zPPb+4&j?a!6ZnPTB>$t`(XSf-}`&+#rI#`GB> zl=$3HORwccTnA2%>$Nmz)u7j%_ywoGri1UXVNRxSf(<@vDLKKxFo;5pTI$R~a|-sQ zd5Rfwj+$k1t0{J`qOL^q>vZUHc7a^`cKKVa{66z?wMuQAfdZBaVVv@-wamPmes$d! z>gv^xx<0jXOz;7HIQS z4RBIFD?7{o^IQ=sNQ-k!ao*+V*|-^I2=UF?{d>bE9avsWbAs{sRE-y`7r zxVAKA9amvo4T}ZAHSF-{y1GqUHlDp4DO9I3mz5h8n|}P-9nKD|$r9AS3gbF1AX=2B zyaK3TbKYqv%~JHKQH8v+%zQ8UVEGDZY|mb>Oe3JD_Z{+Pq%HB+J1s*y6JOlk`6~H) zKt)YMZ*RkbU!GPHzJltmW-=6zqO=5;S)jz{ zFSx?ryqSMxgx|Nhv3z#kFBTuTBHsViaOHs5e&vXZ@l@mVI37<+^KvTE51!pB4Tggq zz!NlRY2ZLno0&6bA|KHPYOMY;;LZG&_lzuLy{@i$&B(}_*~Zk2 z>bkQ7u&Ww%CFh{aqkT{HCbPbRX&EvPRp=}WKmyHc>S_-qbwAr0<20vEoJ(!?-ucjE zKQ+nSlRL^VnOX0h+WcjGb6WI(8;7bsMaHXDb6ynPoOXMlf9nLKre;w*#E_whR#5!! z!^%_+X3eJVKc$fMZP;+xP$~e(CIP1R&{2m+iTQhDoC8Yl@kLM=Wily_cu>7C1wjVU z-^~I0P06ZSNVaN~A`#cSBH2L&tk6R%dU1(u1XdAx;g+5S^Hn9-L$v@p7CCF&PqV{Z?R$}4EJi36+u2JP7l(@fYfP!=e#76LGy^f>~vs0%s*x@X8`|5 zGd6JOHsQ=feES4Vo8%1P_7F5qjiIm#oRT0kO1(?Z_Dk6oX&j=Xd8Klk(;gk3S(ZFnc^8Gc=d;8O-R9tlGyp=2I@1teAZpGWUi;}`n zbJOS_Z2L16nVtDnPpMn{+wR9&yU9~C<-ncppPee`>@1k7hTl5Fn_3_KzQ)u{iJPp3 z)df?Xo%9ta%(dp@DhKuQj4D8=_!*ra#Ib&OXKrsYvAG%H7Kq|43WbayvsbeeimSa= z8~{7ya9ZUAIgLLPeuNmSB&#-`Je0Lja)M$}I41KHb7dQq$wgwX+EElNxBgyyLbA2* z=c1VJR%EPJEw(7!UE?4w@94{pI3E%(acEYd8*Wmr^R7|IM2RZ-RVXSkXy-8$!(iB* zQA`qh2Ze!EY6}Zs7vRz&nr|L60NlIgnO3L*Yz2k2Ivfen?drnVzzu3)1V&-t5S~S? zw#=Sdh>K@2vA25su*@>npw&7A%|Uh9T1jR$mV*H@)pU0&2#Se`7iJlOr$mp79`DKM z5vr*XLrg7w6lc4&S{So1KGKBqcuJ!E|HVFB?vTOjQHi)g+FwJqX@Y3q(qa#6T@3{q zhc@2T-W}XD9x4u+LCdce$*}x!Sc#+rH-sCz6j}0EE`Tk*irUq)y^za`}^1gFnF)C!yf_l_}I<6qfbT$Gc&Eyr?!QwJR~RE4!gKVmqjbI+I^*^ z&hz^7r-dgm@Mbfc#{JTH&^6sJCZt-NTpChB^fzQ}?etydyf~+)!d%V$0faN(f`rJb zm_YaJZ@>Fg>Ay2&bzTx3w^u-lsulc{mX4-nH*A(32O&b^EWmSuk{#HJk}_ULC}SB(L7`YAs>opp9o5UcnB^kVB*rmW6{s0&~_>J!_#+cEWib@v-Ms`?!&=3fDot`oH9v&$f<52>{n2l* z1FRzJ#yQbTHO}}wt0!y8Eh-0*|Um3vjX-nWH>`JN5tWB_gnW%; zUJ0V?_a#+!=>ahhrbGvmvObe8=v1uI8#gNHJ#>RwxL>E^pT05Br8+$@a9aDC1~$@* zicSQCbQcr=DCHM*?G7Hsovk|{$3oIwvymi#YoXeVfWj{Gd#XmnDgzQPRUKNAAI44y z{1WG&rhIR4ipmvBmq$BZ*5tmPIZmhhWgq|TcuR{6lA)+vhj(cH`0;+B^72{&a7ff* zkrIo|pd-Yxm+VVptC@QNCDk0=Re%Sz%ta7y{5Dn9(EapBS0r zLbDKeZepar5%cAcb<^;m>1{QhMzRmRem=+0I3ERot-)gb`i|sII^A#^Gz+x>TW5A& z3PQcpM$lDy`zb%1yf!e8&_>D02RN950KzW>GN6n@2so&Wu09x@PB=&IkIf|zZ1W}P zAKf*&Mo5@@G=w&290aG1@3=IMCB^|G4L7*xn;r3v&HBrD4D)Zg+)f~Ls$7*P-^i#B z4X7ac=0&58j^@2EBZCs}YPe3rqgLAA1L3Y}o?}$%u~)7Rk=LLFbAdSy@-Uw6lv?0K z&P@@M`o2Rll3GoYjotf@WNNjHbe|R?IKVn*?Rzf9v9QoFMq)ODF~>L}26@z`KA82t z43e!^z&WGqAk$Ww8j6bc3$I|;5^BHwt`?e)zf|&+l#!8uJV_Cwy-n1yS0^Q{W*a8B zTzTYL>tt&I&9vzGQUrO?YIm6C1r>eyh|qw~-&;7s7u1achP$K3VnXd8sV8J7ZTxTh z5+^*J5%_#X)XL2@>h(Gmv$@)fZ@ikR$v(2Rax89xscFEi!3_;ORI0dBxw)S{r50qf zg&_a*>2Xe{s@)7OX9O!C?^6fD8tc3bQTq9}fxhbx2@QeaO9Ej+2m!u~+u%Q6?Tgz{ zjYS}bleKcVhW~1$?t*AO^p!=Xkkgwx6OTik*R3~yg^L`wUU9Dq#$Z*iW%?s6pO_f8 zJ8w#u#Eaw7=8n{zJ}C>w{enA6XYHfUf7h)!Qaev)?V=yW{b@-z`hAz;I7^|DoFChP z1aYQnkGauh*ps6x*_S77@z1wwGmF8ky9fMbM$dr*`vsot4uvqWn)0vTRwJqH#&D%g zL3(0dP>%Oj&vm5Re%>*4x|h1J2X*mK5BH1?Nx_#7( zepgF`+n)rHXj!RiipusEq!X81;QQBXlTvLDj=Qub(ha&D=BDx3@-V*d!D9PeXUY?l zwZ0<4=iY!sUj4G>zTS+eYX7knN-8Oynl=NdwHS*nSz_5}*5LQ@=?Yr?uj$`C1m2OR zK`f5SD2|;=BhU#AmaTKe9QaSHQ_DUj1*cUPa*JICFt1<&S3P3zsrs^yUE;tx=x^cmW!Jq!+hohv_B> zPDMT0D&08dC4x@cTD$o1$x%So1Ir(G3_AVQMvQ13un~sP(cEWi$2%5q93E7t{3VJf%K? zuwSyDke~7KuB2?*#DV8YzJw z&}SCDexnUPD!%4|y~7}VzvJ4ch)WT4%sw@ItwoNt(C*RP)h?&~^g##vnhR0!HvIYx z0td2yz9=>t3JNySl*TszmfH6`Ir;ft@RdWs3}!J88UE|gj_GMQ6$ZYphUL2~4OY7} zB*33_bjkRf_@l;Y!7MIdb~bVe;-m78Pz|pdy=O*3kjak63UnLt!{^!!Ljg0rJD3a~ z1Q;y5Z^MF<=Hr}rdoz>yRczx+p3RxxgJE2GX&Si)14B@2t21j4hnnP#U?T3g#+{W+Zb z5s^@>->~-}4|_*!5pIzMCEp|3+i1XKcfUxW`8|ezAh>y{WiRcjSG*asw6;Ef(k#>V ztguN?EGkV_mGFdq!n#W)<7E}1#EZN8O$O|}qdoE|7K?F4zo1jL-v}E8v?9qz(d$&2 zMwyK&xlC9rXo_2xw7Qe0caC?o?Pc*-QAOE!+UvRuKjG+;dk|jQhDDBe?`XT7Y5lte zqSu0t5`;>Wv%|nhj|ZiE^IqA_lZu7OWh!2Y(627zb=r7Ends}wVk7Q5o09a@ojhH7 zU0m&h*8+j4e|OqWyJ&B`V`y=>MVO;K9=hk^6EsmVAGkLT{oUtR{JqSRY{Qi{kKw1k z6s;0SMPJOLp!som|A`*q3t0wIj-=bG8a#MC)MHcMSQU98Juv$?$CvYX)(n`P^!`5| zv3q@@|G@6wMqh;d;m4qvdibx2Yjml}vG9mDv&!0ne02M#D`Bo}xIB0VWh8>>WtNZQ z$&ISlJX;*ORQIO;k62qA{^6P%3!Z=Y1EbmY02{w^yB$`;%!{kur&XTGDiO2cjA)lr zsY^XZWy^DSAaz;kZ_VG?uWnJR7qdN18$~)>(kOoybY0~QYu9||K#|$Mby{3GduV~N zk9H7$7=RSo+?CUYF502`b76ytBy}sFak&|HIwRvB=0D|S`c#QCJPq zP)uOWI)#(n&{6|C4A^G~%B~BY21aOMoz9RuuM`Ip%oBz+NoAlb7?#`E^}7xXo!4S? zFg8I~G%!@nXi8&aJSGFcZAxQf;0m}942=i#p-&teLvE{AKm7Sl2f}Io?!IqbC|J;h z`=5LFOnU5?^w~SV@YwNZx$k_(kLNxZDE z3cf08^-rIT_>A$}B%IJBPcN^)4;90BQtiEi!gT#+EqyAUZ|}*b_}R>SGloq&6?opL zuT_+lwQMgg6!Cso$BwUA;k-1NcrzyE>(_X$B0HocjY~=Pk~Q08+N}(|%HjO_i+*=o z%G6C6A30Ch<0UlG;Zdj@ed!rfUY_i9mYwK8(aYuzcUzlTJ1yPz|Bb-9b33A9zRhGl>Ny-Q#JAq-+qtI@B@&w z$;PJbyiW=!py@g2hAi0)U1v=;avka`gd@8LC4=BEbNqL&K^UAQ5%r95#x%^qRB%KLaqMnG|6xKAm}sx!Qwo}J=2C;NROi$mfADui4)y(3wVA3k~{j^_5%H)C6K zlYAm1eY**HZOj($)xfKIQFtIVw$4&yvz9>(Crs>Gh{ zya6-FG7Dgi92#K)64=9Csj5?Zqe~_9TwSI!2quAwa1w-*uC5!}xY`?tltb0Hq740< zsq2QelPveZ4chr$=~U3!+c&>xyfvA1`)owOqj=i4wjY=A1577Gwg&Ko7;?il9r|_* z8P&IDV_g2D{in5OLFxsO!kx3AhO$5aKeoM|!q|VokqMlYM@HtsRuMtBY%I35#5$+G zpp|JOeoj^U=95HLemB04Yqv{a8X<^K9G2`&ShM_6&Bi1n?o?@MXsDj9Z*A3>#XK%J zRc*&SlFl>l)9DyRQ{*%Z+^e1XpH?0@vhpXrnPPU*d%vOhKkimm-u3c%Q^v3RKp9kx@A2dS?QfS=iigGr7m><)YkV=%LA5h@Uj@9=~ABPMJ z1UE;F&;Ttg5Kc^Qy!1SuvbNEqdgu3*l`=>s5_}dUv$B%BJbMiWrrMm7OXOdi=GOmh zZBvXXK7VqO&zojI2Om9};zCB5i|<210I{iwiGznGCx=FT89=Ef)5!lB1cZ6lbzgDn07*he}G&w7m!;|E(L-?+cz@0<9ZI~LqYQE7>HnPA436}oeN2Y(VfG6 zxNZuMK3Crm^Z_AFeHc~CVRrSl0W^?+Gbteu1g8NGYa3(8f*P{(ZT>%!jtSl6WbYVv zmE(37t0C8vJ6O-5+o*lL9XRcFbd~GSBGbGh3~R!67g&l)7n!kJlWd)~TUyXus#!&G6sR%(l(h1$xyrR5j_jM1zj#giA&@(Xl26@n<9>folx!92bQ z24h570+<)4!$!IQ(5yOU|4_E6aN@4v0+{Kx~Z z;q7fp%0cHziuI%!kB~w}g9@V+1wDz0wFlzX2UOvOy|&;e;t!lAR8tV2KQHgtfk8Uf zw;rs!(4JPODERk4ckd5I2Vq|0rd@@Mwd8MID%0^fITjYIQom^q;qhP8@|eJx{?5xX zc1@Fj*kDknlk{c-rnCloQ3hGh7OU+@efO3>fkRMcM>J?AeVP& zlfzX%cdp=N+4S#E*%^=BQ+N`A7C}|k%$|QUn0yI6S3$MS-NjO!4hm55uyju)Q6e!} z*OVO@A#-mfC9Pha6ng((Xl^V7{d+&u+yx)_B1{~t7d5e8L^i4J>;x<7@5;+l7-Gge zf#9diXJ$&v^rbN5V(ee%q0xBMEgS6%qZm7hNUP%G;^J44I!BmI@M*+FWz0!+s;+iQ zU4CuI+27bvNK8v>?7PZnVxB=heJ&_ymE0nN^W#-rqB%+JXkYGDuRw>JM_LdtLkiq* z6%%3&^BX$jnM@2bjiGc-DymKly)wVkA-pq;jSWL#7_*moZZ4I|-N}o8SK?sIv)p|c zu~9-B%tMc=!)YMFp*SiC0>kfnH8+X5>;+FFVN{~a9YVdIg1uGkZ~kegFy{^PU(4{( z`CbY`XmVA3esai686Yw8djCEyF7`bfB^F1)nwv+AqYLZ&Zy=eFhYT2uMd@{sP_qS4 zbJ&>PxajjZt?&c<1^!T|pLHfX=E^FJ>-l_XCZzvRV%x}@u(FtF(mS+Umw$e+IA74e>gCdTqi;6&=euAIpxd=Y3I5xWR zBhGoT+T`V1@91OlQ}2YO*~P4ukd*TBBdt?Plt)_ou6Y@Db`ss+Q~A-48s>?eaJYA2 zRGOa8^~Em}EFTmKIVVbMb|ob)hJJ7ITg>yHAn2i|{2ZJU!cwt9YNDT0=*WO7Bq#Xj zg@FjEaKoolrF8%c;49|`IT&25?O$dq8kp3#la9&6aH z6G|{>^C(>yP7#Dr$aeFyS0Ai_$ILhL43#*mgEl(c*4?Ae;tRL&S7Vc}Szl>B`mBuI zB9Y%xp%CZwlH!3V(`6W4-ZuETssvI&B~_O;CbULfl)X1V%(H7VSPf`_Ka9ak@8A=z z1l|B1QKT}NLI`WVTRd;2En5u{0CRqy9PTi$ja^inu){LJ&E&6W%JJPw#&PaTxpt?k zpC~gjN*22Q8tpGHR|tg~ye#9a8N<%odhZJnk7Oh=(PKfhYfzLAxdE36r<6a?A;rO&ELp_Y?8Pdw(PT^Fxn!eG_|LEbSYoBrsBA|6Fgr zt5LntyusI{Q2fdy=>ditS;}^B;I2MD4=(>7fWt0Jp~y=?VvfvzHvQhj6dyIef46J$ zl4Xu7U9v_NJV?uBBC0!kcTS0UcrV7+@~is?Fi+jrr@l3XwD|uG zr26jUWiv>Ju48Y^#qn7r9mwIH-Pv6Y|V|V-GZ&+&gQ?S?-`&ts{@5GXPqbmyZjUACC&oVXfNwUX0}ba(v978 zp8z!v9~8Zx8qB@7>oFPDm^iR@+yw`79YF)w^OHB_N;&&x7c3l^3!)IY#)}x)@D(iNaOm9 zC=^*!{`7={3*S=%iU=KsPXh=DDZcc``Ss>057i{pdW8M@4q+Ba@Tt%OytH!4>rbIbQw^-pR zGGYNPzw@n=PV@)b7yVbFr;glF*Qq3>F9oBN5PUXt!?2mdGcpv^o1?Thp`jP10G2Yi z(c93td3F3SW!Le5DUwdub!aDKoVLU6g!O?Ret21l$qOC;kdd@L#M&baVu&JZGt&<6 z!VCkvgRaav6QDW2x}tUy4~Y5(B+#Ej-8vM?DM-1?J_*&PntI3E96M!`WL#<&Z5n2u zo`P!~vBT$YOT~gU9#PB)%JZ zcd_u=m^LYzC!pH#W`yA1!(fA;D~b zG#73@l)NNd;n#XrKXZEfab;@kQRnOFU2Th-1m<4mJzlj9b3pv-GF$elX7ib9!uILM_$ke zHIGB*&=5=;ynQA{y7H93%i^d)T}y@(p>8vVhJ4L)M{0Q*@D^+SPp`EW+G6E%+`Z;u zS3goV@Dic7vc5`?!pCN44Ts@*{)zwy)9?B||AM{zKlN4T}qQRL2 zgv+{K8bv7w)#xge16;kI1fU87!W4pX)N&|cq8&i^1r`W|Hg4366r(?-ecEJ9u&Eaw zrhyikXQB>C9d>cpPGiu=VU3Z-u4|0V_iap!_J3o+K_R5EXk@sfu~zHwwYkpncVh!R zqNe7Cmf_|Wmeq4#(mIO&(wCK@b4(x0?W1Qtk(`$?+$uCJCGZm_%k?l32vuShgDFMa ztc`{$8DhB9)&?~(m&EUc=LzI1=qo#zjy#2{hLT_*aj<618qQ7mD#k2ZFGou&69;=2 z1j7=Su8k}{L*h&mfs7jg^PN&9C1Z@U!p6gXk&-7xM~{X`nqH#aGO`;Xy_zbz^rYacIq0AH%4!Oh93TzJ820%ur)8OyeS@K?sF1V(iFO z37Nnqj1z#1{|v7=_CX`lQA|$<1gtuNMHGNJYp1D_k;WQk-b+T6VmUK(x=bWviOZ~T z|4e%SpuaWLWD?qN2%`S*`P;BQBw(B__wTD6epvGdJ+>DBq2oVlf&F*lz+#avb4)3P1c^Mf#olQheVvZ|Z5 z>xXfgmv!5Z^SYn+_x}K5B%G^sRwiez&z9|f!E!#oJlT2kCOV0000$L_|bHBqAarB4TD{W@grX1CUr72@caw0faEd7-K|4L_|cawbojjHdpd6 zI6~Iv5J?-Q4*&oF000000FV;^004t70Z6Qk1Xl{X9oJ{sRC2(cs?- literal 0 HcmV?d00001 diff --git a/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js b/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js deleted file mode 100755 index 2c64257146c4..000000000000 --- a/hbase-server/src/main/resources/hbase-webapps/static/js/bootstrap.js +++ /dev/null @@ -1,1999 +0,0 @@ -/** -* bootstrap.js v3.0.0 by @fat and @mdo -* Copyright 2013 Twitter Inc. -* http://www.apache.org/licenses/LICENSE-2.0 -*/ -if (!jQuery) { throw new Error("Bootstrap requires jQuery") } - -/* ======================================================================== - * Bootstrap: transition.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#transitions - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/) - // ============================================================ - - function transitionEnd() { - var el = document.createElement('bootstrap') - - var transEndEventNames = { - 'WebkitTransition' : 'webkitTransitionEnd' - , 'MozTransition' : 'transitionend' - , 'OTransition' : 'oTransitionEnd otransitionend' - , 'transition' : 'transitionend' - } - - for (var name in transEndEventNames) { - if (el.style[name] !== undefined) { - return { end: transEndEventNames[name] } - } - } - } - - // http://blog.alexmaccaw.com/css-transitions - $.fn.emulateTransitionEnd = function (duration) { - var called = false, $el = this - $(this).one($.support.transition.end, function () { called = true }) - var callback = function () { if (!called) $($el).trigger($.support.transition.end) } - setTimeout(callback, duration) - return this - } - - $(function () { - $.support.transition = transitionEnd() - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: alert.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#alerts - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // ALERT CLASS DEFINITION - // ====================== - - var dismiss = '[data-dismiss="alert"]' - var Alert = function (el) { - $(el).on('click', dismiss, this.close) - } - - Alert.prototype.close = function (e) { - var $this = $(this) - var selector = $this.attr('data-target') - - if (!selector) { - selector = $this.attr('href') - selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 - } - - var $parent = $(selector) - - if (e) e.preventDefault() - - if (!$parent.length) { - $parent = $this.hasClass('alert') ? $this : $this.parent() - } - - $parent.trigger(e = $.Event('close.bs.alert')) - - if (e.isDefaultPrevented()) return - - $parent.removeClass('in') - - function removeElement() { - $parent.trigger('closed.bs.alert').remove() - } - - $.support.transition && $parent.hasClass('fade') ? - $parent - .one($.support.transition.end, removeElement) - .emulateTransitionEnd(150) : - removeElement() - } - - - // ALERT PLUGIN DEFINITION - // ======================= - - var old = $.fn.alert - - $.fn.alert = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.alert') - - if (!data) $this.data('bs.alert', (data = new Alert(this))) - if (typeof option == 'string') data[option].call($this) - }) - } - - $.fn.alert.Constructor = Alert - - - // ALERT NO CONFLICT - // ================= - - $.fn.alert.noConflict = function () { - $.fn.alert = old - return this - } - - - // ALERT DATA-API - // ============== - - $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: button.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#buttons - * ======================================================================== - * Copyright 2013 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // BUTTON PUBLIC CLASS DEFINITION - // ============================== - - var Button = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, Button.DEFAULTS, options) - } - - Button.DEFAULTS = { - loadingText: 'loading...' - } - - Button.prototype.setState = function (state) { - var d = 'disabled' - var $el = this.$element - var val = $el.is('input') ? 'val' : 'html' - var data = $el.data() - - state = state + 'Text' - - if (!data.resetText) $el.data('resetText', $el[val]()) - - $el[val](data[state] || this.options[state]) - - // push to event loop to allow forms to submit - setTimeout(function () { - state == 'loadingText' ? - $el.addClass(d).attr(d, d) : - $el.removeClass(d).removeAttr(d); - }, 0) - } - - Button.prototype.toggle = function () { - var $parent = this.$element.closest('[data-toggle="buttons"]') - - if ($parent.length) { - var $input = this.$element.find('input') - .prop('checked', !this.$element.hasClass('active')) - .trigger('change') - if ($input.prop('type') === 'radio') $parent.find('.active').removeClass('active') - } - - this.$element.toggleClass('active') - } - - - // BUTTON PLUGIN DEFINITION - // ======================== - - var old = $.fn.button - - $.fn.button = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.button') - var options = typeof option == 'object' && option - - if (!data) $this.data('bs.button', (data = new Button(this, options))) - - if (option == 'toggle') data.toggle() - else if (option) data.setState(option) - }) - } - - $.fn.button.Constructor = Button - - - // BUTTON NO CONFLICT - // ================== - - $.fn.button.noConflict = function () { - $.fn.button = old - return this - } - - - // BUTTON DATA-API - // =============== - - $(document).on('click.bs.button.data-api', '[data-toggle^=button]', function (e) { - var $btn = $(e.target) - if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') - $btn.button('toggle') - e.preventDefault() - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: carousel.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#carousel - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // CAROUSEL CLASS DEFINITION - // ========================= - - var Carousel = function (element, options) { - this.$element = $(element) - this.$indicators = this.$element.find('.carousel-indicators') - this.options = options - this.paused = - this.sliding = - this.interval = - this.$active = - this.$items = null - - this.options.pause == 'hover' && this.$element - .on('mouseenter', $.proxy(this.pause, this)) - .on('mouseleave', $.proxy(this.cycle, this)) - } - - Carousel.DEFAULTS = { - interval: 5000 - , pause: 'hover' - , wrap: true - } - - Carousel.prototype.cycle = function (e) { - e || (this.paused = false) - - this.interval && clearInterval(this.interval) - - this.options.interval - && !this.paused - && (this.interval = setInterval($.proxy(this.next, this), this.options.interval)) - - return this - } - - Carousel.prototype.getActiveIndex = function () { - this.$active = this.$element.find('.item.active') - this.$items = this.$active.parent().children() - - return this.$items.index(this.$active) - } - - Carousel.prototype.to = function (pos) { - var that = this - var activeIndex = this.getActiveIndex() - - if (pos > (this.$items.length - 1) || pos < 0) return - - if (this.sliding) return this.$element.one('slid', function () { that.to(pos) }) - if (activeIndex == pos) return this.pause().cycle() - - return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos])) - } - - Carousel.prototype.pause = function (e) { - e || (this.paused = true) - - if (this.$element.find('.next, .prev').length && $.support.transition.end) { - this.$element.trigger($.support.transition.end) - this.cycle(true) - } - - this.interval = clearInterval(this.interval) - - return this - } - - Carousel.prototype.next = function () { - if (this.sliding) return - return this.slide('next') - } - - Carousel.prototype.prev = function () { - if (this.sliding) return - return this.slide('prev') - } - - Carousel.prototype.slide = function (type, next) { - var $active = this.$element.find('.item.active') - var $next = next || $active[type]() - var isCycling = this.interval - var direction = type == 'next' ? 'left' : 'right' - var fallback = type == 'next' ? 'first' : 'last' - var that = this - - if (!$next.length) { - if (!this.options.wrap) return - $next = this.$element.find('.item')[fallback]() - } - - this.sliding = true - - isCycling && this.pause() - - var e = $.Event('slide.bs.carousel', { relatedTarget: $next[0], direction: direction }) - - if ($next.hasClass('active')) return - - if (this.$indicators.length) { - this.$indicators.find('.active').removeClass('active') - this.$element.one('slid', function () { - var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()]) - $nextIndicator && $nextIndicator.addClass('active') - }) - } - - if ($.support.transition && this.$element.hasClass('slide')) { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $next.addClass(type) - $next[0].offsetWidth // force reflow - $active.addClass(direction) - $next.addClass(direction) - $active - .one($.support.transition.end, function () { - $next.removeClass([type, direction].join(' ')).addClass('active') - $active.removeClass(['active', direction].join(' ')) - that.sliding = false - setTimeout(function () { that.$element.trigger('slid') }, 0) - }) - .emulateTransitionEnd(600) - } else { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $active.removeClass('active') - $next.addClass('active') - this.sliding = false - this.$element.trigger('slid') - } - - isCycling && this.cycle() - - return this - } - - - // CAROUSEL PLUGIN DEFINITION - // ========================== - - var old = $.fn.carousel - - $.fn.carousel = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.carousel') - var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option) - var action = typeof option == 'string' ? option : options.slide - - if (!data) $this.data('bs.carousel', (data = new Carousel(this, options))) - if (typeof option == 'number') data.to(option) - else if (action) data[action]() - else if (options.interval) data.pause().cycle() - }) - } - - $.fn.carousel.Constructor = Carousel - - - // CAROUSEL NO CONFLICT - // ==================== - - $.fn.carousel.noConflict = function () { - $.fn.carousel = old - return this - } - - - // CAROUSEL DATA-API - // ================= - - $(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) { - var $this = $(this), href - var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7 - var options = $.extend({}, $target.data(), $this.data()) - var slideIndex = $this.attr('data-slide-to') - if (slideIndex) options.interval = false - - $target.carousel(options) - - if (slideIndex = $this.attr('data-slide-to')) { - $target.data('bs.carousel').to(slideIndex) - } - - e.preventDefault() - }) - - $(window).on('load', function () { - $('[data-ride="carousel"]').each(function () { - var $carousel = $(this) - $carousel.carousel($carousel.data()) - }) - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: collapse.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#collapse - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // COLLAPSE PUBLIC CLASS DEFINITION - // ================================ - - var Collapse = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, Collapse.DEFAULTS, options) - this.transitioning = null - - if (this.options.parent) this.$parent = $(this.options.parent) - if (this.options.toggle) this.toggle() - } - - Collapse.DEFAULTS = { - toggle: true - } - - Collapse.prototype.dimension = function () { - var hasWidth = this.$element.hasClass('width') - return hasWidth ? 'width' : 'height' - } - - Collapse.prototype.show = function () { - if (this.transitioning || this.$element.hasClass('in')) return - - var startEvent = $.Event('show.bs.collapse') - this.$element.trigger(startEvent) - if (startEvent.isDefaultPrevented()) return - - var actives = this.$parent && this.$parent.find('> .panel > .in') - - if (actives && actives.length) { - var hasData = actives.data('bs.collapse') - if (hasData && hasData.transitioning) return - actives.collapse('hide') - hasData || actives.data('bs.collapse', null) - } - - var dimension = this.dimension() - - this.$element - .removeClass('collapse') - .addClass('collapsing') - [dimension](0) - - this.transitioning = 1 - - var complete = function () { - this.$element - .removeClass('collapsing') - .addClass('in') - [dimension]('auto') - this.transitioning = 0 - this.$element.trigger('shown.bs.collapse') - } - - if (!$.support.transition) return complete.call(this) - - var scrollSize = $.camelCase(['scroll', dimension].join('-')) - - this.$element - .one($.support.transition.end, $.proxy(complete, this)) - .emulateTransitionEnd(350) - [dimension](this.$element[0][scrollSize]) - } - - Collapse.prototype.hide = function () { - if (this.transitioning || !this.$element.hasClass('in')) return - - var startEvent = $.Event('hide.bs.collapse') - this.$element.trigger(startEvent) - if (startEvent.isDefaultPrevented()) return - - var dimension = this.dimension() - - this.$element - [dimension](this.$element[dimension]()) - [0].offsetHeight - - this.$element - .addClass('collapsing') - .removeClass('collapse') - .removeClass('in') - - this.transitioning = 1 - - var complete = function () { - this.transitioning = 0 - this.$element - .trigger('hidden.bs.collapse') - .removeClass('collapsing') - .addClass('collapse') - } - - if (!$.support.transition) return complete.call(this) - - this.$element - [dimension](0) - .one($.support.transition.end, $.proxy(complete, this)) - .emulateTransitionEnd(350) - } - - Collapse.prototype.toggle = function () { - this[this.$element.hasClass('in') ? 'hide' : 'show']() - } - - - // COLLAPSE PLUGIN DEFINITION - // ========================== - - var old = $.fn.collapse - - $.fn.collapse = function (option) { - return this.each(function () { - var $this = $(this) - var data = $this.data('bs.collapse') - var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option) - - if (!data) $this.data('bs.collapse', (data = new Collapse(this, options))) - if (typeof option == 'string') data[option]() - }) - } - - $.fn.collapse.Constructor = Collapse - - - // COLLAPSE NO CONFLICT - // ==================== - - $.fn.collapse.noConflict = function () { - $.fn.collapse = old - return this - } - - - // COLLAPSE DATA-API - // ================= - - $(document).on('click.bs.collapse.data-api', '[data-toggle=collapse]', function (e) { - var $this = $(this), href - var target = $this.attr('data-target') - || e.preventDefault() - || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7 - var $target = $(target) - var data = $target.data('bs.collapse') - var option = data ? 'toggle' : $this.data() - var parent = $this.attr('data-parent') - var $parent = parent && $(parent) - - if (!data || !data.transitioning) { - if ($parent) $parent.find('[data-toggle=collapse][data-parent="' + parent + '"]').not($this).addClass('collapsed') - $this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed') - } - - $target.collapse(option) - }) - -}(window.jQuery); - -/* ======================================================================== - * Bootstrap: dropdown.js v3.0.0 - * http://twbs.github.com/bootstrap/javascript.html#dropdowns - * ======================================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ======================================================================== */ - - -+function ($) { "use strict"; - - // DROPDOWN CLASS DEFINITION - // ========================= - - var backdrop = '.dropdown-backdrop' - var toggle = '[data-toggle=dropdown]' - var Dropdown = function (element) { - var $el = $(element).on('click.bs.dropdown', this.toggle) - } - - Dropdown.prototype.toggle = function (e) { - var $this = $(this) - - if ($this.is('.disabled, :disabled')) return - - var $parent = getParent($this) - var isActive = $parent.hasClass('open') - - clearMenus() - - if (!isActive) { - if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) { - // if mobile we we use a backdrop because click events don't delegate - $('
    diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index d95a35904dc7..602122db4a31 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -499,11 +499,12 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); if (!ritTotalNum || ritTotalNum < 1) { return; } + var ritPerPage = parseInt($("#rit_per_page").val()); $("#rit_pagination").sPage({ page:1, total:ritTotalNum, - pageSize:5, + pageSize:ritPerPage, noData: false, showPN:true, prevPage:"prev", From bc9e0723ab13e1b5be58e945529a0bb908e66037 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Fri, 25 Sep 2020 19:24:08 +0800 Subject: [PATCH 390/769] Add Zheng Wang to developers list. (#2457) --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 06631c30d6e2..088a7e4898e4 100755 --- a/pom.xml +++ b/pom.xml @@ -674,6 +674,12 @@ meiyi@apache.org +8 + + wangzheng + Zheng (bsglz) Wang + wangzheng@apache.org + +8 + From 562b9e0252eb007a23f1b0520d32a420fc4f081d Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Tue, 22 Sep 2020 14:48:45 -0700 Subject: [PATCH 391/769] HBASE-25085 Add support for java properties to hbase-vote.sh Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel Signed-off-by: Tak-Lon (Stephen) Wu --- dev-support/hbase-vote.sh | 39 ++++++++++++---------- src/main/asciidoc/_chapters/developer.adoc | 4 ++- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/dev-support/hbase-vote.sh b/dev-support/hbase-vote.sh index 88e22849a92f..d608f1e5e4a4 100755 --- a/dev-support/hbase-vote.sh +++ b/dev-support/hbase-vote.sh @@ -29,7 +29,7 @@ hbase-vote. A script for standard vote which verifies the following items 4. Built from source 5. Unit tests -Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] +Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] [-D property[=value]] ${SCRIPT} -h | --help -h | --help Show this screen. @@ -40,24 +40,27 @@ Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file- https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests + -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0> Defaults to unset __EOF } while ((${#})); do case "${1}" in -h | --help ) - usage; exit 0 ;; - -s | --source ) - SOURCE_URL="${2}"; shift 2 ;; - -k | --key ) - SIGNING_KEY="${2}"; shift 2 ;; + usage; exit 0 ;; + -s | --source ) + SOURCE_URL="${2}"; shift 2 ;; + -k | --key ) + SIGNING_KEY="${2}"; shift 2 ;; -f | --keys-file-url ) - KEY_FILE_URL="${2}"; shift 2 ;; + KEY_FILE_URL="${2}"; shift 2 ;; -o | --output-dir ) - OUTPUT_DIR="${2}"; shift 2 ;; + OUTPUT_DIR="${2}"; shift 2 ;; -P ) - MVN_ARGS="-P ${2}"; shift 2 ;; - * ) + MVN_PROFILES="-P ${2}"; shift 2 ;; + -D ) + MVN_PROPERTIES="-D ${2}"; shift 2 ;; + * ) usage >&2; exit 1 ;; esac done @@ -89,8 +92,8 @@ if [ ! -d "${OUTPUT_DIR}" ]; then fi # Maven profile must be provided -if [ -z "${MVN_ARGS}" ]; then - MVN_ARGS="-P runAllTests" +if [ -z "${MVN_PROFILES}" ]; then + MVN_PROFILES="-P runAllTests" fi OUTPUT_PATH_PREFIX="${OUTPUT_DIR}"/"${HBASE_RC_VERSION}" @@ -142,17 +145,17 @@ function unzip_from_source() { function rat_test() { rm -f "${OUTPUT_PATH_PREFIX}"_rat_test - mvn clean apache-rat:check 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 + mvn clean apache-rat:check "${MVN_PROPERTIES}" 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 } function build_from_source() { rm -f "${OUTPUT_PATH_PREFIX}"_build_from_source - mvn clean install -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 + mvn clean install "${MVN_PROPERTIES}" -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 } function run_tests() { rm -f "${OUTPUT_PATH_PREFIX}"_run_tests - mvn package "${MVN_ARGS}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 + mvn package "${MVN_PROFILES}" "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 } function execute() { @@ -164,11 +167,11 @@ function print_when_exit() { * Signature: $( ((SIGNATURE_PASSED)) && echo "ok" || echo "failed" ) * Checksum : $( ((CHECKSUM_PASSED)) && echo "ok" || echo "failed" ) * Rat check (${JAVA_VERSION}): $( ((RAT_CHECK_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean apache-rat:check + - mvn clean apache-rat:check "${MVN_PROPERTIES}" * Built from source (${JAVA_VERSION}): $( ((BUILD_FROM_SOURCE_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean install -DskipTests + - mvn clean install -DskipTests "${MVN_PROPERTIES}" * Unit tests pass (${JAVA_VERSION}): $( ((UNIT_TEST_PASSED)) && echo "ok" || echo "failed" ) - - mvn package ${MVN_ARGS} + - mvn package ${MVN_PROFILES} "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 __EOF if ((CHECKSUM_PASSED)) && ((SIGNATURE_PASSED)) && ((RAT_CHECK_PASSED)) && ((BUILD_FROM_SOURCE_PASSED)) && ((UNIT_TEST_PASSED)) ; then exit 0 diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 27c369255459..3e2e3938f459 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -1152,7 +1152,7 @@ hbase-vote. A script for standard vote which verifies the following items 4. Built from source 5. Unit tests -Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] +Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] [-D property[=value]] hbase-vote.sh -h | --help -h | --help Show this screen. @@ -1162,6 +1162,8 @@ Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-f -f | --keys-file-url '' the URL of the key file, default is https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target + -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests + -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0> Defaults to unset ---- * If you see any unit test failures, please call out the solo test result and whether it's part of flaky (nightly) tests dashboard, e.g. link:https://builds.apache.org/view/H-L/view/HBase/job/HBase-Find-Flaky-Tests/job/master/lastSuccessfulBuild/artifact/dashboard.html[dashboard of master branch] (please change the test branch accordingly). From eb8d5353dc08a69c70b684fea4548ed1f7ea7de5 Mon Sep 17 00:00:00 2001 From: Joseph295 <517536891@qq.com> Date: Sat, 26 Sep 2020 16:55:54 +0800 Subject: [PATCH 392/769] HBASE-25088 CatalogFamilyFormat/MetaTableAccessor.parseRegionInfoFromRegionName incorrectly setEndKey to regionId (#2448) Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang --- .../hadoop/hbase/CatalogFamilyFormat.java | 2 +- .../hadoop/hbase/TestCatalogFamilyFormat.java | 32 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index a2297b66ab32..d0ee3dc83326 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -101,7 +101,7 @@ public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws long regionId = Long.parseLong(Bytes.toString(fields[2])); int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1]) - .setEndKey(fields[2]).setSplit(false).setRegionId(regionId).setReplicaId(replicaId).build(); + .setRegionId(regionId).setReplicaId(replicaId).build(); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java index 78e0fdba3016..628655a083c2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java @@ -19,13 +19,19 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.ClassRule; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; @Category({ ClientTests.class, SmallTests.class }) public class TestCatalogFamilyFormat { @@ -34,6 +40,9 @@ public class TestCatalogFamilyFormat { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCatalogFamilyFormat.class); + @Rule + public TestName name = new TestName(); + @Test public void testParseReplicaIdFromServerColumn() { String column1 = HConstants.SERVER_QUALIFIER_STR; @@ -70,4 +79,27 @@ public void testMetaReaderGetColumnMethods() { HConstants.SEQNUM_QUALIFIER_STR + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"), CatalogFamilyFormat.getSeqNumColumn(42)); } + + /** + * The info we can get from the regionName is: table name, start key, regionId, replicaId. + */ + @Test + public void testParseRegionInfoFromRegionName() throws IOException { + RegionInfo originalRegionInfo = RegionInfoBuilder.newBuilder( + TableName.valueOf(name.getMethodName())).setRegionId(999999L) + .setStartKey(Bytes.toBytes("2")).setEndKey(Bytes.toBytes("3")) + .setReplicaId(1).build(); + RegionInfo newParsedRegionInfo = CatalogFamilyFormat + .parseRegionInfoFromRegionName(originalRegionInfo.getRegionName()); + assertEquals("Parse TableName error", originalRegionInfo.getTable(), + newParsedRegionInfo.getTable()); + assertEquals("Parse regionId error", originalRegionInfo.getRegionId(), + newParsedRegionInfo.getRegionId()); + assertTrue("Parse startKey error", Bytes.equals(originalRegionInfo.getStartKey(), + newParsedRegionInfo.getStartKey())); + assertEquals("Parse replicaId error", originalRegionInfo.getReplicaId(), + newParsedRegionInfo.getReplicaId()); + assertTrue("We can't parse endKey from regionName only", + Bytes.equals(HConstants.EMPTY_END_ROW, newParsedRegionInfo.getEndKey())); + } } From 1c4e55fe340bfa5f56962259923b9e841af215d6 Mon Sep 17 00:00:00 2001 From: XinSun Date: Sat, 26 Sep 2020 19:49:02 +0800 Subject: [PATCH 393/769] HBASE-25098 ReplicationStatisticsChore runs in wrong time unit (#2460) Signed-off-by: Viraj Jasani Signed-off-by: Guanghao Zhang --- .../hbase/replication/ReplicationSinkServiceImpl.java | 8 +++++--- .../hbase/replication/regionserver/Replication.java | 9 ++++++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java index 9b0e3f79fe07..91dd8d08e675 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -51,7 +52,7 @@ public class ReplicationSinkServiceImpl implements ReplicationSinkService { // ReplicationLoad to access replication metrics private ReplicationLoad replicationLoad; - private int statsPeriod; + private int statsPeriodInSecond; @Override public void replicateLogEntries(List entries, CellScanner cells, @@ -66,7 +67,7 @@ public void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir WALProvider walProvider) throws IOException { this.server = server; this.conf = server.getConfiguration(); - this.statsPeriod = + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); } @@ -75,7 +76,8 @@ public void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir public void startReplicationService() throws IOException { this.replicationSink = new ReplicationSink(this.conf); this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSinkStatistics", server, statsPeriod)); + new ReplicationStatisticsChore("ReplicationSinkStatistics", server, + (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 33975edb5909..9be7b9a1e4c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.OptionalLong; import java.util.UUID; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -64,7 +66,7 @@ public class Replication implements ReplicationSourceService { private SyncReplicationPeerInfoProvider syncReplicationPeerInfoProvider; // Hosting server private Server server; - private int statsPeriod; + private int statsPeriodInSecond; // ReplicationLoad to access replication metrics private ReplicationLoad replicationLoad; private MetricsReplicationGlobalSourceSource globalMetricsSource; @@ -139,7 +141,7 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); } } - this.statsPeriod = + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); @@ -169,7 +171,8 @@ public void stopReplicationService() { public void startReplicationService() throws IOException { this.replicationManager.init(); this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSourceStatistics", server, statsPeriod)); + new ReplicationStatisticsChore("ReplicationSourceStatistics", server, + (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); LOG.info("{} started", this.server.toString()); } From bf2746f0e434339b780e9abd808519d73eab81dc Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sat, 26 Sep 2020 20:16:48 +0530 Subject: [PATCH 394/769] HBASE-25045 : Add 2.3.2 to the downloads page Closes #2461 Signed-off-by: Guanghao Zhang --- src/site/xdoc/downloads.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index c49f09a9177e..37ea25af5e75 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -45,24 +45,24 @@ under the License. - 2.3.1 + 2.3.2 - 2020/08/19 + 2020/09/25 - 2.3.0 vs 2.3.1 + 2.3.1 vs 2.3.2 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) From e99ff2e591753c57d6180ee74c05a9ef02a079bc Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Sun, 27 Sep 2020 11:35:38 +0530 Subject: [PATCH 395/769] HBASE-25077: hbck.jsp page loading fails, logs NPE in master log. (#2433) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani Signed-off-by: Pankaj Kumar --- .../main/java/org/apache/hadoop/hbase/master/HbckChore.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java index 8b886c8a4598..0973d037c8a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java @@ -255,10 +255,10 @@ private void loadRegionsFromRSReport() { for (Map.Entry entry : regionInfoMap.entrySet()) { HbckRegionInfo hri = entry.getValue(); ServerName locationInMeta = hri.getMetaEntry().getRegionServer(); + if (locationInMeta == null) { + continue; + } if (hri.getDeployedOn().size() == 0) { - if (locationInMeta == null) { - continue; - } // skip the offline region which belong to disabled table. if (disabledTableRegions.contains(hri.getRegionNameAsString())) { continue; From e2afea0390d1a9e5db0d59d393447597b05ff1c3 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Mon, 28 Sep 2020 14:47:18 +0900 Subject: [PATCH 396/769] HBASE-25096 WAL size in RegionServer UI is wrong (#2456) Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/MetricsRegionServerWrapperImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index c4328c410da4..8ce2baaef4d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -856,7 +856,7 @@ synchronized public void run() { numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) + (metaProvider == null ? 0 : metaProvider.getNumLogFiles()); walFileSize = (provider == null ? 0 : provider.getLogFileSize()) + - (provider == null ? 0 : provider.getLogFileSize()); + (metaProvider == null ? 0 : metaProvider.getLogFileSize()); // Copy over computed values so that no thread sees half computed values. numStores = tempNumStores; numStoreFiles = tempNumStoreFiles; From 1b99e9ce4266d1bb67d3b9ffdf8d2b2bbb621796 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 28 Sep 2020 12:52:00 +0530 Subject: [PATCH 397/769] HBASE-25070 : With new generic API getLogEntries, cleaning up unused RPC APIs Closes #2426 Signed-off-by: Guanghao Zhang --- .../org/apache/hadoop/hbase/client/Admin.java | 4 +-- .../hadoop/hbase/client/AsyncAdmin.java | 4 +-- .../main/protobuf/server/region/Admin.proto | 6 ----- .../hbase/regionserver/RSRpcServices.java | 27 +------------------ .../hadoop/hbase/master/MockRegionServer.java | 14 ---------- 5 files changed, 5 insertions(+), 50 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 75d55cf17839..370ab6408254 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2512,8 +2512,8 @@ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(St * Examples include slow/large RPC logs, balancer decisions by master. * * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index c2d7e8a07829..2ed624ca01f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1714,8 +1714,8 @@ default CompletableFuture> getSlowLogResponses( * Examples include slow/large RPC logs, balancer decisions by master. * * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index ca0c93209ec9..0667292917ae 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -393,12 +393,6 @@ service AdminService { rpc ExecuteProcedures(ExecuteProceduresRequest) returns(ExecuteProceduresResponse); - rpc GetSlowLogResponses(SlowLogResponseRequest) - returns(SlowLogResponses); - - rpc GetLargeLogResponses(SlowLogResponseRequest) - returns(SlowLogResponses); - rpc ClearSlowLogsResponses(ClearSlowLogResponseRequest) returns(ClearSlowLogResponses); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index be64966570f1..a59f5e609b17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3851,19 +3851,6 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, } } - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public SlowLogResponses getSlowLogResponses(final RpcController controller, - final SlowLogResponseRequest request) { - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - final List slowLogPayloads = getSlowLogPayloads(request, namedQueueRecorder); - SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder() - .addAllSlowLogPayloads(slowLogPayloads) - .build(); - return slowLogResponses; - } - private List getSlowLogPayloads(SlowLogResponseRequest request, NamedQueueRecorder namedQueueRecorder) { if (namedQueueRecorder == null) { @@ -3881,19 +3868,6 @@ private List getSlowLogPayloads(SlowLogResponseRequest request, return slowLogPayloads; } - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public SlowLogResponses getLargeLogResponses(final RpcController controller, - final SlowLogResponseRequest request) { - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - final List slowLogPayloads = getSlowLogPayloads(request, namedQueueRecorder); - SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder() - .addAllSlowLogPayloads(slowLogPayloads) - .build(); - return slowLogResponses; - } - @Override @QosPriority(priority = HConstants.ADMIN_QOS) public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, @@ -3911,6 +3885,7 @@ public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controll } @Override + @QosPriority(priority = HConstants.ADMIN_QOS) public HBaseProtos.LogEntry getLogEntries(RpcController controller, HBaseProtos.LogRequest request) throws ServiceException { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 9a7135ce1c02..69a7a79644e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -114,8 +114,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponseRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponses; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; @@ -679,18 +677,6 @@ public ExecuteProceduresResponse executeProcedures(RpcController controller, return null; } - @Override - public SlowLogResponses getSlowLogResponses(RpcController controller, - SlowLogResponseRequest request) throws ServiceException { - return null; - } - - @Override - public SlowLogResponses getLargeLogResponses(RpcController controller, - SlowLogResponseRequest request) throws ServiceException { - return null; - } - @Override public ClearSlowLogResponses clearSlowLogsResponses(RpcController controller, ClearSlowLogResponseRequest request) throws ServiceException { From 31d84e95fc743d3881db200a96adc36b65b1d0ec Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 28 Sep 2020 20:01:43 +0800 Subject: [PATCH 398/769] HBASE-25017 Migrate flaky reporting jenkins job from Hadoop to hbase (#2466) Signed-off-by: Guanghao Zhang --- dev-support/flaky-tests/flaky-reporting.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 8a56c0bdb0cc..640b1cb54a77 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { From f4de2f53dd122190517b48ecc36edfa1a0f9c00b Mon Sep 17 00:00:00 2001 From: XinSun Date: Tue, 29 Sep 2020 08:27:37 +0800 Subject: [PATCH 399/769] HBASE-25100 conf and conn are assigned twice in HBaseReplicationEndpoint and HBaseInterClusterReplicationEndpoint (#2463) Signed-off-by: Duo Zhang Signed-off-by: Guanghao Zhang --- .../replication/HBaseReplicationEndpoint.java | 61 +++++++++++++------ .../HBaseInterClusterReplicationEndpoint.java | 33 ---------- 2 files changed, 42 insertions(+), 52 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 850a79125562..b08c99098c5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -60,10 +60,11 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private static final Logger LOG = LoggerFactory.getLogger(HBaseReplicationEndpoint.class); private ZKWatcher zkw = null; + private final Object zkwLock = new Object(); protected Configuration conf; - protected AsyncClusterConnection conn; + private AsyncClusterConnection conn; /** * Default maximum number of times a replication sink can be reported as bad before @@ -103,10 +104,6 @@ protected AsyncClusterConnection createConnection(Configuration conf) throws IOE public void init(Context context) throws IOException { super.init(context); this.conf = HBaseConfiguration.create(ctx.getConfiguration()); - // TODO: This connection is replication specific or we should make it particular to - // replication and make replication specific settings such as compression or codec to use - // passing Cells. - this.conn = createConnection(this.conf); this.ratio = ctx.getConfiguration().getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); this.badSinkThreshold = @@ -114,9 +111,19 @@ public void init(Context context) throws IOException { this.badReportCounts = Maps.newHashMap(); } - protected synchronized void disconnect() { - if (zkw != null) { - zkw.close(); + protected void disconnect() { + synchronized (zkwLock) { + if (zkw != null) { + zkw.close(); + } + } + if (this.conn != null) { + try { + this.conn.close(); + this.conn = null; + } catch (IOException e) { + LOG.warn("{} Failed to close the connection", ctx.getPeerId()); + } } } @@ -128,11 +135,11 @@ private void reconnect(KeeperException ke) { if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException || ke instanceof AuthFailedException) { String clusterKey = ctx.getPeerConfig().getClusterKey(); - LOG.warn("Lost the ZooKeeper connection for peer " + clusterKey, ke); + LOG.warn("Lost the ZooKeeper connection for peer {}", clusterKey, ke); try { reloadZkWatcher(); } catch (IOException io) { - LOG.warn("Creation of ZookeeperWatcher failed for peer " + clusterKey, io); + LOG.warn("Creation of ZookeeperWatcher failed for peer {}", clusterKey, io); } } } @@ -151,6 +158,7 @@ public void stop() { protected void doStart() { try { reloadZkWatcher(); + connectPeerCluster(); notifyStarted(); } catch (IOException e) { notifyFailed(e); @@ -168,10 +176,12 @@ protected void doStop() { // limit connections when multiple replication sources try to connect to // the peer cluster. If the peer cluster is down we can get out of control // over time. - public synchronized UUID getPeerUUID() { + public UUID getPeerUUID() { UUID peerUUID = null; try { - peerUUID = ZKClusterId.getUUIDForCluster(zkw); + synchronized (zkwLock) { + peerUUID = ZKClusterId.getUUIDForCluster(zkw); + } } catch (KeeperException ke) { reconnect(ke); } @@ -182,13 +192,24 @@ public synchronized UUID getPeerUUID() { * Closes the current ZKW (if not null) and creates a new one * @throws IOException If anything goes wrong connecting */ - private synchronized void reloadZkWatcher() throws IOException { - if (zkw != null) { - zkw.close(); + private void reloadZkWatcher() throws IOException { + synchronized (zkwLock) { + if (zkw != null) { + zkw.close(); + } + zkw = new ZKWatcher(ctx.getConfiguration(), + "connection to cluster: " + ctx.getPeerId(), this); + zkw.registerListener(new PeerRegionServerListener(this)); + } + } + + private void connectPeerCluster() throws IOException { + try { + conn = createConnection(this.conf); + } catch (IOException ioe) { + LOG.warn("{} Failed to create connection for peer cluster", ctx.getPeerId(), ioe); + throw ioe; } - zkw = new ZKWatcher(ctx.getConfiguration(), - "connection to cluster: " + ctx.getPeerId(), this); - zkw.registerListener(new PeerRegionServerListener(this)); } @Override @@ -211,7 +232,9 @@ public boolean isAborted() { protected List fetchSlavesAddresses() { List children = null; try { - children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); + synchronized (zkwLock) { + children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); + } } catch (KeeperException ke) { if (LOG.isDebugEnabled()) { LOG.debug("Fetch slaves addresses failed", ke); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index b6e1f69173fe..b127b467505d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -44,14 +44,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; -import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -59,7 +56,6 @@ import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Threads; @@ -127,7 +123,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi @Override public void init(Context context) throws IOException { super.init(context); - this.conf = HBaseConfiguration.create(ctx.getConfiguration()); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", @@ -139,10 +134,6 @@ public void init(Context context) throws IOException { DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); this.maxTerminationWait = maxTerminationWaitMultiplier * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - // TODO: This connection is replication specific or we should make it particular to - // replication and make replication specific settings such as compression or codec to use - // passing Cells. - this.conn = createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); @@ -412,19 +403,6 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt return entryList; } - private void reconnectToPeerCluster() { - AsyncClusterConnection connection = null; - try { - connection = - ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent()); - } catch (IOException ioe) { - LOG.warn("{} Failed to create connection for peer cluster", logPeerId(), ioe); - } - if (connection != null) { - this.conn = connection; - } - } - private long parallelReplicate(CompletionService pool, ReplicateContext replicateContext, List> batches) throws IOException { int futures = 0; @@ -504,9 +482,6 @@ public boolean replicate(ReplicateContext replicateContext) { } continue; } - if (this.conn == null) { - reconnectToPeerCluster(); - } try { // replicate the batches to sink side. parallelReplicate(pool, replicateContext, batches); @@ -564,14 +539,6 @@ protected boolean isPeerEnabled() { @Override protected void doStop() { disconnect(); // don't call super.doStop() - if (this.conn != null) { - try { - this.conn.close(); - this.conn = null; - } catch (IOException e) { - LOG.warn("{} Failed to close the connection", logPeerId()); - } - } // Allow currently running replication tasks to finish exec.shutdown(); try { From edc0a77e40d64ccc9860a36b037b0e36f2521729 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 29 Sep 2020 15:25:05 +0800 Subject: [PATCH 400/769] Revert "HBASE-25017 Migrate flaky reporting jenkins job from Hadoop to hbase (#2466)" This reverts commit fbef545989f0824fc948d723a885a4ce13a26b7b. --- dev-support/flaky-tests/flaky-reporting.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 640b1cb54a77..8a56c0bdb0cc 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'hbase' + label 'Hadoop' } } triggers { From 99e9249d9bbb6e7b116cf3d9f707685de5f91a8a Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 28 Sep 2020 20:01:43 +0800 Subject: [PATCH 401/769] HBASE-25107 Migrate flaky reporting jenkins job from Hadoop to hbase (#2466) Signed-off-by: Guanghao Zhang --- dev-support/flaky-tests/flaky-reporting.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 8a56c0bdb0cc..640b1cb54a77 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { From 65ded2e937e607c1b8676d945197226c097846d1 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Tue, 29 Sep 2020 15:36:03 +0800 Subject: [PATCH 402/769] =?UTF-8?q?HBASE-24967=20The=20table.jsp=20cost=20?= =?UTF-8?q?long=20time=20to=20load=20if=20the=20table=20include=E2=80=A6?= =?UTF-8?q?=20(#2326)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * HBASE-24967 The table.jsp cost long time to load if the table include closed regions * fix it by another way * fix review issue * fix checkstyle warnings * fix checkstyle warning --- .../apache/hadoop/hbase/RegionMetrics.java | 6 ++ .../hadoop/hbase/RegionMetricsBuilder.java | 25 +++++++- .../hbase/shaded/protobuf/ProtobufUtil.java | 17 +++++ .../main/protobuf/server/ClusterStatus.proto | 10 +++ .../apache/hadoop/hbase/master/HMaster.java | 54 ++++++++++++++-- .../hbase/regionserver/HRegionServer.java | 2 +- .../resources/hbase-webapps/master/table.jsp | 18 +----- .../master/TestRegionsRecoveryChore.java | 6 ++ .../regionserver/TestCompactionState.java | 63 +++++++++++++++---- 9 files changed, 166 insertions(+), 35 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 7d732607ae36..8cd3ea156c4d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase; import java.util.Map; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -182,4 +183,9 @@ default String getNameAsString() { * @return the block total weight of this region */ long getBlocksTotalWeight(); + + /** + * @return the compaction state of this region + */ + CompactionState getCompactionState(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index c3839662ac27..8349c35d7d33 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -24,12 +24,14 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -58,6 +60,8 @@ public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regio .setBlocksLocalWithSsdWeight(regionLoadPB.hasBlocksLocalWithSsdWeight() ? regionLoadPB.getBlocksLocalWithSsdWeight() : 0) .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) + .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad( + regionLoadPB.getCompactionState())) .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) @@ -159,6 +163,7 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWeight; private long blocksLocalWithSsdWeight; private long blocksTotalWeight; + private CompactionState compactionState; private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -263,6 +268,11 @@ public RegionMetricsBuilder setBlocksTotalWeight(long value) { this.blocksTotalWeight = value; return this; } + public RegionMetricsBuilder setCompactionState(CompactionState compactionState) { + this.compactionState = compactionState; + return this; + } + public RegionMetrics build() { return new RegionMetricsImpl(name, storeCount, @@ -289,7 +299,8 @@ public RegionMetrics build() { dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, - blocksTotalWeight); + blocksTotalWeight, + compactionState); } private static class RegionMetricsImpl implements RegionMetrics { @@ -319,6 +330,7 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWeight; private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; + private final CompactionState compactionState; RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, @@ -344,7 +356,8 @@ private static class RegionMetricsImpl implements RegionMetrics { float dataLocalityForSsd, long blocksLocalWeight, long blocksLocalWithSsdWeight, - long blocksTotalWeight) { + long blocksTotalWeight, + CompactionState compactionState) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -371,6 +384,7 @@ private static class RegionMetricsImpl implements RegionMetrics { this.blocksLocalWeight = blocksLocalWeight; this.blocksLocalWithSsdWeight = blocksLocalWithSsdWeight; this.blocksTotalWeight = blocksTotalWeight; + this.compactionState = compactionState; } @Override @@ -503,6 +517,11 @@ public long getBlocksTotalWeight() { return blocksTotalWeight; } + @Override + public CompactionState getCompactionState() { + return compactionState; + } + @Override public String toString() { StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", @@ -562,6 +581,8 @@ public String toString() { blocksLocalWithSsdWeight); Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); + Strings.appendKeyValue(sb, "compactionState", + compactionState); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d2217c65dd03..d5fdb89302c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2960,6 +2960,23 @@ public static GetRegionInfoResponse.CompactionState createCompactionState(Compac return GetRegionInfoResponse.CompactionState.valueOf(state.toString()); } + /** + * Creates {@link CompactionState} from + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos + * .RegionLoad.CompactionState} state + * @param state the protobuf CompactionState + * @return CompactionState + */ + public static CompactionState createCompactionStateForRegionLoad( + RegionLoad.CompactionState state) { + return CompactionState.valueOf(state.toString()); + } + + public static RegionLoad.CompactionState createCompactionStateForRegionLoad( + CompactionState state) { + return RegionLoad.CompactionState.valueOf(state.toString()); + } + public static Optional toOptionalTimestamp(MajorCompactionTimestampResponse resp) { long timestamp = resp.getCompactionTimestamp(); return timestamp == 0 ? Optional.empty() : Optional.of(timestamp); diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 0c8e89d185d8..35f3c2d054b5 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -167,6 +167,16 @@ message RegionLoad { /** The current blocks total weight for region in the regionserver */ optional uint64 blocks_total_weight = 26; + + /** The compaction state for region */ + optional CompactionState compaction_state = 27; + + enum CompactionState { + NONE = 0; + MINOR = 1; + MAJOR = 2; + MAJOR_AND_MINOR = 3; + } } message UserLoad { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index e4bd3c5fce22..cf43c8b814c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -75,13 +75,16 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PleaseHoldException; +import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; @@ -237,7 +240,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** @@ -3445,12 +3448,12 @@ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws I * @param tableName The current table name. * @return If a given table is in mob file compaction now. */ - public CompactionState getMobCompactionState(TableName tableName) { + public GetRegionInfoResponse.CompactionState getMobCompactionState(TableName tableName) { AtomicInteger compactionsCount = mobCompactionStates.get(tableName); if (compactionsCount != null && compactionsCount.get() != 0) { - return CompactionState.MAJOR_AND_MINOR; + return GetRegionInfoResponse.CompactionState.MAJOR_AND_MINOR; } - return CompactionState.NONE; + return GetRegionInfoResponse.CompactionState.NONE; } public void reportMobCompactionStart(TableName tableName) throws IOException { @@ -3900,4 +3903,47 @@ public MetaRegionLocationCache getMetaRegionLocationCache() { public RSGroupInfoManager getRSGroupInfoManager() { return rsGroupInfoManager; } + + /** + * Get the compaction state of the table + * + * @param tableName The table name + * @return CompactionState Compaction state of the table + */ + public CompactionState getCompactionState(final TableName tableName) { + CompactionState compactionState = CompactionState.NONE; + try { + List regions = + assignmentManager.getRegionStates().getRegionsOfTable(tableName, false); + for (RegionInfo regionInfo : regions) { + ServerName serverName = + assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo); + if (serverName == null) { + continue; + } + ServerMetrics sl = serverManager.getLoad(serverName); + if (sl == null) { + continue; + } + RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName()); + if (regionMetrics.getCompactionState() == CompactionState.MAJOR) { + if (compactionState == CompactionState.MINOR) { + compactionState = CompactionState.MAJOR_AND_MINOR; + } else { + compactionState = CompactionState.MAJOR; + } + } else if (regionMetrics.getCompactionState() == CompactionState.MINOR) { + if (compactionState == CompactionState.MAJOR) { + compactionState = CompactionState.MAJOR_AND_MINOR; + } else { + compactionState = CompactionState.MINOR; + } + } + } + } catch (Exception e) { + compactionState = null; + LOG.error("Exception when get compaction state for " + tableName.getNameAsString(), e); + } + return compactionState; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index cd90fb87d9a2..d6eb45fe65e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1758,9 +1758,9 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, .setBlocksLocalWeight(blocksLocalWeight) .setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight) .setBlocksTotalWeight(blocksTotalWeight) + .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())) .setLastMajorCompactionTs(r.getOldestHfileTs(true)); r.setCompleteSequenceId(regionLoadBldr); - return regionLoadBldr.build(); } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index e46b2778546d..23eeb3ab740f 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -30,6 +30,7 @@ import="java.util.HashSet" import="java.util.Optional" import="java.util.TreeMap" + import="java.util.concurrent.TimeoutException" import="java.util.concurrent.TimeUnit" import="org.apache.commons.lang3.StringEscapeUtils" import="org.apache.hadoop.conf.Configuration" @@ -654,21 +655,8 @@ <% if (master.getAssignmentManager().isTableEnabled(table.getName())) { - try { - CompactionState compactionState = admin.getCompactionState(table.getName()).get(); - %><%= compactionState %><% - } catch (Exception e) { - - if(e.getCause() != null && e.getCause().getCause() instanceof NotServingRegionException) { - %><%= CompactionState.NONE %><% - } else { - // Nothing really to do here - for(StackTraceElement element : e.getStackTrace()) { - %><%= StringEscapeUtils.escapeHtml4(element.toString()) %><% - } - %> Unknown <% - } - } + CompactionState compactionState = master.getCompactionState(table.getName()); + %><%= compactionState==null?"UNKNOWN":compactionState %><% } else { %><%= CompactionState.NONE %><% } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java index 50361edd6d8d..2208f5a8107a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UserMetrics; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -512,6 +513,11 @@ public long getBlocksLocalWithSsdWeight() { public long getBlocksTotalWeight() { return 0; } + + @Override + public CompactionState getCompactionState() { + return null; + } }; return regionMetrics; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index 599db542ff41..39171da02fae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -69,24 +70,50 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + enum StateSource { + ADMIN, MASTER + } + + @Test + public void testMajorCompactionStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, false, StateSource.ADMIN); + } + + @Test + public void testMinorCompactionStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, false, StateSource.ADMIN); + } + + @Test + public void testMajorCompactionOnFamilyStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, true, StateSource.ADMIN); + } + @Test - public void testMajorCompaction() throws IOException, InterruptedException { - compaction(name.getMethodName(), 8, CompactionState.MAJOR, false); + public void testMinorCompactionOnFamilyStateFromAdmin() throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, true, StateSource.ADMIN); } @Test - public void testMinorCompaction() throws IOException, InterruptedException { - compaction(name.getMethodName(), 15, CompactionState.MINOR, false); + public void testMajorCompactionStateFromMaster() throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, false, StateSource.MASTER); } @Test - public void testMajorCompactionOnFamily() throws IOException, InterruptedException { - compaction(name.getMethodName(), 8, CompactionState.MAJOR, true); + public void testMinorCompactionStateFromMaster() throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, false, StateSource.MASTER); } @Test - public void testMinorCompactionOnFamily() throws IOException, InterruptedException { - compaction(name.getMethodName(), 15, CompactionState.MINOR, true); + public void testMajorCompactionOnFamilyStateFromMaster() + throws IOException, InterruptedException { + compaction(name.getMethodName(), 8, CompactionState.MAJOR, true, StateSource.MASTER); + } + + @Test + public void testMinorCompactionOnFamilyStateFromMaster() + throws IOException, InterruptedException { + compaction(name.getMethodName(), 15, CompactionState.MINOR, true, StateSource.MASTER); } @Test @@ -127,11 +154,12 @@ public void testInvalidColumnFamily() throws IOException, InterruptedException { * @param flushes * @param expectedState * @param singleFamily otherwise, run compaction on all cfs + * @param stateSource get the state by Admin or Master * @throws IOException * @throws InterruptedException */ private void compaction(final String tableName, final int flushes, - final CompactionState expectedState, boolean singleFamily) + final CompactionState expectedState, boolean singleFamily, StateSource stateSource) throws IOException, InterruptedException { // Create a table with regions TableName table = TableName.valueOf(tableName); @@ -143,6 +171,7 @@ private void compaction(final String tableName, final int flushes, ht = TEST_UTIL.createTable(table, families); loadData(ht, families, 3000, flushes); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); + HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); List regions = rs.getRegions(table); int countBefore = countStoreFilesInFamilies(regions, families); int countBeforeSingleFamily = countStoreFilesInFamily(regions, family); @@ -164,10 +193,10 @@ private void compaction(final String tableName, final int flushes, long curt = System.currentTimeMillis(); long waitTime = 5000; long endt = curt + waitTime; - CompactionState state = admin.getCompactionState(table); + CompactionState state = getCompactionState(stateSource, master, admin, table); while (state == CompactionState.NONE && curt < endt) { Thread.sleep(10); - state = admin.getCompactionState(table); + state = getCompactionState(stateSource, master, admin, table); curt = System.currentTimeMillis(); } // Now, should have the right compaction state, @@ -179,10 +208,10 @@ private void compaction(final String tableName, final int flushes, } } else { // Wait until the compaction is done - state = admin.getCompactionState(table); + state = getCompactionState(stateSource, master, admin, table); while (state != CompactionState.NONE && curt < endt) { Thread.sleep(10); - state = admin.getCompactionState(table); + state = getCompactionState(stateSource, master, admin, table); } // Now, compaction should be done. assertEquals(CompactionState.NONE, state); @@ -210,6 +239,14 @@ private void compaction(final String tableName, final int flushes, } } + private static CompactionState getCompactionState(StateSource stateSource, HMaster master, + Admin admin, TableName table) throws IOException { + CompactionState state = stateSource == StateSource.ADMIN ? + admin.getCompactionState(table) : + master.getCompactionState(table); + return state; + } + private static int countStoreFilesInFamily( List regions, final byte[] family) { return countStoreFilesInFamilies(regions, new byte[][]{family}); From d0a26081e10e4d58f3119d4b1f5df206e6fc1bca Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 29 Sep 2020 15:48:57 +0800 Subject: [PATCH 403/769] HBASE-25103 Remove ZNodePaths.metaReplicaZNodes (#2464) Signed-off-by: Huaxiang Sun --- .../hbase/client/ZKConnectionRegistry.java | 2 +- .../hadoop/hbase/zookeeper/ZNodePaths.java | 68 ++++++------------- .../hbase/master/MasterMetaBootstrap.java | 2 +- .../hbase/master/MetaRegionLocationCache.java | 6 +- .../hbase/master/zksyncer/ClientZKSyncer.java | 2 +- .../master/zksyncer/MetaLocationSyncer.java | 12 ++-- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 10 +-- 7 files changed, 38 insertions(+), 64 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 42a418859f18..f1f052138637 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -141,7 +141,7 @@ private void getMetaRegionLocation(CompletableFuture future, HRegionLocation[] locs = new HRegionLocation[metaReplicaZNodes.size()]; MutableInt remaining = new MutableInt(locs.length); for (String metaReplicaZNode : metaReplicaZNodes) { - int replicaId = znodePaths.getMetaReplicaIdFromZnode(metaReplicaZNode); + int replicaId = znodePaths.getMetaReplicaIdFromZNode(metaReplicaZNode); String path = ZNodePaths.joinZNode(znodePaths.baseZNode, metaReplicaZNode); if (replicaId == DEFAULT_REPLICA_ID) { addListener(getAndConvert(path, ZKConnectionRegistry::getMetaProto), (proto, error) -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 5c49808807ff..a0065a9e9cbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -17,22 +17,15 @@ */ package org.apache.hadoop.hbase.zookeeper; -import static org.apache.hadoop.hbase.HConstants.DEFAULT_META_REPLICA_NUM; import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; -import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; -import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; -import java.util.Collection; -import java.util.Optional; -import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; - /** * Class that hold all the paths of znode for HBase. */ @@ -55,11 +48,6 @@ public class ZNodePaths { */ private final String metaZNodePrefix; - /** - * znodes containing the locations of the servers hosting the meta replicas - */ - private final ImmutableMap metaReplicaZNodes; - // znode containing ephemeral nodes of the regionservers public final String rsZNode; // znode containing ephemeral nodes of the draining regionservers @@ -104,14 +92,7 @@ public class ZNodePaths { public ZNodePaths(Configuration conf) { baseZNode = conf.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT); - ImmutableMap.Builder builder = ImmutableMap.builder(); metaZNodePrefix = conf.get(META_ZNODE_PREFIX_CONF_KEY, META_ZNODE_PREFIX); - String defaultMetaReplicaZNode = ZNodePaths.joinZNode(baseZNode, metaZNodePrefix); - builder.put(DEFAULT_REPLICA_ID, defaultMetaReplicaZNode); - int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, DEFAULT_META_REPLICA_NUM); - IntStream.range(1, numMetaReplicas) - .forEachOrdered(i -> builder.put(i, defaultMetaReplicaZNode + "-" + i)); - metaReplicaZNodes = builder.build(); rsZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs")); drainingZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); masterAddressZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); @@ -142,7 +123,6 @@ public ZNodePaths(Configuration conf) { public String toString() { return new StringBuilder() .append("ZNodePaths [baseZNode=").append(baseZNode) - .append(", metaReplicaZNodes=").append(metaReplicaZNodes) .append(", rsZNode=").append(rsZNode) .append(", drainingZNode=").append(drainingZNode) .append(", masterAddressZNode=").append(masterAddressZNode) @@ -164,29 +144,15 @@ public String toString() { .append("]").toString(); } - /** - * @return true if the znode is a meta region replica - */ - public boolean isAnyMetaReplicaZNode(String node) { - return this.metaReplicaZNodes.containsValue(node); - } - - /** - * @return Meta Replica ZNodes - */ - public Collection getMetaReplicaZNodes() { - return this.metaReplicaZNodes.values(); - } - /** * @return the znode string corresponding to a replicaId */ public String getZNodeForReplica(int replicaId) { - // return a newly created path but don't update the cache of paths - // This is mostly needed for tests that attempt to create meta replicas - // from outside the master - return Optional.ofNullable(metaReplicaZNodes.get(replicaId)) - .orElseGet(() -> metaReplicaZNodes.get(DEFAULT_REPLICA_ID) + "-" + replicaId); + if (RegionReplicaUtil.isDefaultReplica(replicaId)) { + return joinZNode(baseZNode, metaZNodePrefix); + } else { + return joinZNode(baseZNode, metaZNodePrefix + "-" + replicaId); + } } /** @@ -198,7 +164,7 @@ public int getMetaReplicaIdFromPath(String path) { // Extract the znode from path. The prefix is of the following format. // baseZNode + PATH_SEPARATOR. int prefixLen = baseZNode.length() + 1; - return getMetaReplicaIdFromZnode(path.substring(prefixLen)); + return getMetaReplicaIdFromZNode(path.substring(prefixLen)); } /** @@ -206,7 +172,7 @@ public int getMetaReplicaIdFromPath(String path) { * @param znode the name of the znode, does not include baseZNode * @return replicaId */ - public int getMetaReplicaIdFromZnode(String znode) { + public int getMetaReplicaIdFromZNode(String znode) { return znode.equals(metaZNodePrefix)? RegionInfo.DEFAULT_REPLICA_ID: Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); @@ -220,17 +186,25 @@ public boolean isMetaZNodePrefix(String znode) { } /** - * Returns whether the znode is supposed to be readable by the client and DOES NOT contain + * @return True is the fully qualified path is for meta location + */ + public boolean isMetaZNodePath(String path) { + int prefixLen = baseZNode.length() + 1; + return path.length() > prefixLen && isMetaZNodePrefix(path.substring(prefixLen)); + } + + /** + * Returns whether the path is supposed to be readable by the client and DOES NOT contain * sensitive information (world readable). */ - public boolean isClientReadable(String node) { + public boolean isClientReadable(String path) { // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS // all clients need to access this data to work. Using zk for sharing data to clients (other // than service lookup case is not a recommended design pattern. - return node.equals(baseZNode) || isAnyMetaReplicaZNode(node) || - node.equals(masterAddressZNode) || node.equals(clusterIdZNode) || node.equals(rsZNode) || + return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) || + path.equals(clusterIdZNode) || path.equals(rsZNode) || // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not - node.equals(tableZNode) || node.startsWith(tableZNode + "/"); + path.equals(tableZNode) || path.startsWith(tableZNode + "/"); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java index c676df8b6c88..0b3476fc9dd5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java @@ -92,7 +92,7 @@ private void unassignExcessMetaReplica(int numMetaReplicasConfigured) { try { List metaReplicaZnodes = zooKeeper.getMetaReplicaNodes(); for (String metaReplicaZnode : metaReplicaZnodes) { - int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZnode(metaReplicaZnode); + int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaZnode); if (replicaId >= numMetaReplicasConfigured) { RegionState r = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId); LOG.info("Closing excess replica of meta region " + r.getRegion()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java index f4e91b56051d..07512d16fd60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java @@ -157,7 +157,7 @@ private HRegionLocation getMetaRegionLocation(int replicaId) } private void updateMetaLocation(String path, ZNodeOpType opType) { - if (!isValidMetaZNode(path)) { + if (!isValidMetaPath(path)) { return; } LOG.debug("Updating meta znode for path {}: {}", path, opType.name()); @@ -220,8 +220,8 @@ public Optional> getMetaRegionLocations() { * Helper to check if the given 'path' corresponds to a meta znode. This listener is only * interested in changes to meta znodes. */ - private boolean isValidMetaZNode(String path) { - return watcher.getZNodePaths().isAnyMetaReplicaZNode(path); + private boolean isValidMetaPath(String path) { + return watcher.getZNodePaths().isMetaZNodePath(path); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java index b1c70c569356..38dc11218687 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java @@ -207,7 +207,7 @@ public synchronized void nodeDeleted(String path) { /** * @return the znode(s) to watch */ - abstract Collection getNodesToWatch(); + abstract Collection getNodesToWatch() throws KeeperException; /** * Thread to synchronize znode data to client ZK cluster diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java index 98d73224ce9b..dca5cadf8adf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java @@ -19,10 +19,12 @@ package org.apache.hadoop.hbase.master.zksyncer; import java.util.Collection; - +import java.util.stream.Collectors; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; /** * Tracks the meta region locations on server ZK cluster and synchronize them to client ZK cluster @@ -36,11 +38,13 @@ public MetaLocationSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server s @Override boolean validate(String path) { - return watcher.getZNodePaths().isAnyMetaReplicaZNode(path); + return watcher.getZNodePaths().isMetaZNodePath(path); } @Override - Collection getNodesToWatch() { - return watcher.getZNodePaths().getMetaReplicaZNodes(); + Collection getNodesToWatch() throws KeeperException { + return watcher.getMetaReplicaNodes().stream() + .map(znode -> ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode)) + .collect(Collectors.toList()); } } \ No newline at end of file diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 19d11d0704fc..45732d2efddf 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -37,14 +37,11 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.security.Superusers; @@ -78,6 +75,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; @@ -1861,9 +1859,7 @@ public static String dump(ZKWatcher zkw) { } sb.append("\nRegion server holding hbase:meta: " + MetaTableLocator.getMetaRegionLocation(zkw)); - Configuration conf = HBaseConfiguration.create(); - int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int numMetaReplicas = zkw.getMetaReplicaNodes().size(); for (int i = 1; i < numMetaReplicas; i++) { sb.append("\nRegion server holding hbase:meta, replicaId " + i + " " + MetaTableLocator.getMetaRegionLocation(zkw, i)); @@ -2109,7 +2105,7 @@ private static void logRetrievedMsg(final ZKWatcher zkw, " byte(s) of data from znode " + znode + (watcherSet? " and set watcher; ": "; data=") + (data == null? "null": data.length == 0? "empty": ( - zkw.getZNodePaths().isMetaZNodePrefix(znode)? + zkw.getZNodePaths().isMetaZNodePath(znode)? getServerNameOrEmptyString(data): znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)? getServerNameOrEmptyString(data): From 053fecf997f4d0ce6cd8b0ceb49ffecf4878f6cb Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 29 Sep 2020 10:00:57 +0100 Subject: [PATCH 404/769] HBASE-24877 addendum: additional checks to avoid one extra possible race control in the initialize loop (#2400) Signed-off-by: Duo Zhang Signed-off-by: Josh Elser --- .../regionserver/ReplicationSource.java | 58 +++++++------ .../regionserver/TestReplicationSource.java | 84 ++++++++++++------- 2 files changed, 88 insertions(+), 54 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index bf8127f93abb..82120736bd42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -39,7 +39,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -128,7 +127,9 @@ public class ReplicationSource implements ReplicationSourceInterface { //so that it doesn't try submit another initialize thread. //NOTE: this should only be set to false at the end of initialize method, prior to return. private AtomicBoolean startupOngoing = new AtomicBoolean(false); - + //Flag that signalizes uncaught error happening while starting up the source + // and a retry should be attempted + private AtomicBoolean retryStartup = new AtomicBoolean(false); /** * A filter (or a chain of filters) for WAL entries; filters out edits. @@ -375,7 +376,7 @@ private void tryStartNewShipper(String walGroupId, PriorityBlockingQueue q LOG.debug("{} preempted start of worker walGroupId={}", logPeerId(), walGroupId); return value; } else { - LOG.debug("{} starting worker for walGroupId={}", logPeerId(), walGroupId); + LOG.debug("{} starting worker for walGroupId={}", logPeerId(), walGroupId); ReplicationSourceShipper worker = createNewShipper(walGroupId, queue); ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, queue, worker.getStartPosition()); @@ -570,6 +571,7 @@ private void initialize() { if (sleepForRetries("Error starting ReplicationEndpoint", sleepMultiplier)) { sleepMultiplier++; } else { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new RuntimeException("Exhausted retries to start replication endpoint."); } @@ -577,6 +579,7 @@ private void initialize() { } if (!this.isSourceActive()) { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } @@ -600,6 +603,7 @@ private void initialize() { } if(!this.isSourceActive()) { + retryStartup.set(!this.abortOnError); this.startupOngoing.set(false); throw new IllegalStateException("Source should be active."); } @@ -618,28 +622,34 @@ private void initialize() { @Override public void startup() { - if (this.sourceRunning) { - return; - } + // mark we are running now this.sourceRunning = true; - //Flag that signalizes uncaught error happening while starting up the source - // and a retry should be attempted - MutableBoolean retryStartup = new MutableBoolean(true); - do { - if(retryStartup.booleanValue()) { - retryStartup.setValue(false); - startupOngoing.set(true); - // mark we are running now - initThread = new Thread(this::initialize); - Threads.setDaemonThreadRunning(initThread, - Thread.currentThread().getName() + ".replicationSource," + this.queueId, - (t,e) -> { - sourceRunning = false; - uncaughtException(t, e, null, null); - retryStartup.setValue(!this.abortOnError); - }); - } - } while (this.startupOngoing.get() && !this.abortOnError); + startupOngoing.set(true); + initThread = new Thread(this::initialize); + Threads.setDaemonThreadRunning(initThread, + Thread.currentThread().getName() + ".replicationSource," + this.queueId, + (t,e) -> { + //if first initialization attempt failed, and abortOnError is false, we will + //keep looping in this thread until initialize eventually succeeds, + //while the server main startup one can go on with its work. + sourceRunning = false; + uncaughtException(t, e, null, null); + retryStartup.set(!this.abortOnError); + do { + if(retryStartup.get()) { + this.sourceRunning = true; + startupOngoing.set(true); + retryStartup.set(false); + try { + initialize(); + } catch(Throwable error){ + sourceRunning = false; + uncaughtException(t, error, null, null); + retryStartup.set(!this.abortOnError); + } + } + } while ((this.startupOngoing.get() || this.retryStartup.get()) && !this.abortOnError); + }); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 8b8dcd8afa28..ded9e8f28e21 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -444,7 +444,7 @@ protected void doStop() { /** * Deadend Endpoint. Does nothing. */ - public static class FaultyReplicationEndpoint extends DoNothingReplicationEndpoint { + public static class FlakyReplicationEndpoint extends DoNothingReplicationEndpoint { static int count = 0; @@ -460,6 +460,17 @@ public synchronized UUID getPeerUUID() { } + public static class FaultyReplicationEndpoint extends DoNothingReplicationEndpoint { + + static int count = 0; + + @Override + public synchronized UUID getPeerUUID() { + throw new RuntimeException(); + } + + } + /** * Test HBASE-20497 * Moved here from TestReplicationSource because doesn't need cluster. @@ -488,22 +499,16 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception assertEquals(1001L, shipper.getStartPosition()); } - /** - * Test ReplicationSource retries startup once an uncaught exception happens - * during initialization and eplication.source.regionserver.abort is set to false. - */ - @Test - public void testAbortFalseOnError() throws IOException { - ReplicationSource rs = new ReplicationSource(); - Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + private RegionServerServices setupForAbortTests(ReplicationSource rs, Configuration conf, + String endpointName) throws IOException { conf.setInt("replication.source.maxretriesmultiplier", 1); - conf.setBoolean("replication.source.regionserver.abort", false); ReplicationPeer mockPeer = Mockito.mock(ReplicationPeer.class); Mockito.when(mockPeer.getConfiguration()).thenReturn(conf); Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class); + FaultyReplicationEndpoint.count = 0; Mockito.when(peerConfig.getReplicationEndpointImpl()). - thenReturn(FaultyReplicationEndpoint.class.getName()); + thenReturn(endpointName); Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); @@ -512,6 +517,20 @@ public void testAbortFalseOnError() throws IOException { TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); rs.init(conf, null, manager, null, mockPeer, rss, queueId, null, p -> OptionalLong.empty(), new MetricsSource(queueId)); + return rss; + } + + /** + * Test ReplicationSource retries startup once an uncaught exception happens + * during initialization and eplication.source.regionserver.abort is set to false. + */ + @Test + public void testAbortFalseOnError() throws IOException { + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.setBoolean("replication.source.regionserver.abort", false); + ReplicationSource rs = new ReplicationSource(); + RegionServerServices rss = setupForAbortTests(rs, conf, + FlakyReplicationEndpoint.class.getName()); try { rs.startup(); assertTrue(rs.isSourceActive()); @@ -526,34 +545,39 @@ public void testAbortFalseOnError() throws IOException { } } + /** + * Test ReplicationSource keeps retrying startup indefinitely without blocking the main thread, + * when eplication.source.regionserver.abort is set to false. + */ + @Test + public void testAbortFalseOnErrorDoesntBlockMainThread() throws IOException { + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + ReplicationSource rs = new ReplicationSource(); + RegionServerServices rss = setupForAbortTests(rs, conf, + FaultyReplicationEndpoint.class.getName()); + try { + rs.startup(); + assertTrue(true); + } finally { + rs.terminate("Done"); + rss.stop("Done"); + } + } + /** * Test ReplicationSource retries startup once an uncaught exception happens - * during initialization and replication.source.regionserver.abort is set to false. + * during initialization and replication.source.regionserver.abort is set to true. */ @Test public void testAbortTrueOnError() throws IOException { - ReplicationSource rs = new ReplicationSource(); Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.setInt("replication.source.maxretriesmultiplier", 1); - ReplicationPeer mockPeer = Mockito.mock(ReplicationPeer.class); - Mockito.when(mockPeer.getConfiguration()).thenReturn(conf); - Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); - ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class); - Mockito.when(peerConfig.getReplicationEndpointImpl()). - thenReturn(FaultyReplicationEndpoint.class.getName()); - Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); - ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); - Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); - String queueId = "qid"; - RegionServerServices rss = - TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); - rs.init(conf, null, manager, null, mockPeer, rss, queueId, null, - p -> OptionalLong.empty(), new MetricsSource(queueId)); + ReplicationSource rs = new ReplicationSource(); + RegionServerServices rss = setupForAbortTests(rs, conf, + FlakyReplicationEndpoint.class.getName()); try { rs.startup(); - Waiter.waitFor(conf, 1000, () -> FaultyReplicationEndpoint.count > 0); + Waiter.waitFor(conf, 1000, () -> rss.isAborted()); assertFalse(rs.isSourceActive()); - assertTrue(rss.isAborted()); } finally { rs.terminate("Done"); rss.stop("Done"); From 0745fd4a2af45bf28a441e644efaaf17bce68756 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 30 Sep 2020 09:34:15 -0700 Subject: [PATCH 405/769] HBASE-25109 Add MR Counters to WALPlayer; currently hard to tell if it is doing anything (#2468) Add MR counters so operator can see if WALPlayer run actually did anything. Fix bugs in usage (it enforced two args though usage describes allowing one arg only). Clean up usage output. In particular add note on wal file separator as hbase by default uses the ',' in its WAL file names which could befuddle operator trying to do simple import. Signed-off-by: Huaxiang Sun --- .../apache/hadoop/hbase/mapreduce/Driver.java | 3 +- .../hbase/mapreduce/WALInputFormat.java | 5 +- .../hadoop/hbase/mapreduce/WALPlayer.java | 72 ++++++++++++------- .../hadoop/hbase/mapreduce/TestWALPlayer.java | 4 +- src/main/asciidoc/_chapters/ops_mgt.adoc | 44 ++++++------ 5 files changed, 75 insertions(+), 53 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index d52a31067f42..ed31c8422e7e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -34,6 +34,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { + private Driver() {} public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 1815412721f4..7c4be83a73e9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -154,14 +154,13 @@ public void initialize(InputSplit split, TaskAttemptContext context) WALSplit hsplit = (WALSplit)split; logFile = new Path(hsplit.getLogFileName()); conf = context.getConfiguration(); - LOG.info("Opening reader for "+split); + LOG.info("Opening {} for {}", logFile, split); openReader(logFile); this.startTime = hsplit.getStartTime(); this.endTime = hsplit.getEndTime(); } - private void openReader(Path path) throws IOException - { + private void openReader(Path path) throws IOException { closeReader(); reader = AbstractFSWALProvider.openReader(path, conf); seek(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index bbaa7549fa9a..5b1aac654414 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -58,6 +58,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; + + /** * A tool to replay WAL files as a M/R job. * The WAL can be replayed for a set of tables or all tables, @@ -140,7 +142,22 @@ public void setup(Context context) throws IOException { } /** - * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance. + * Enum for map metrics. Keep it out here rather than inside in the Map + * inner-class so we can find associated properties. + */ + protected static enum Counter { + /** Number of aggregated writes */ + PUTS, + /** Number of aggregated deletes */ + DELETES, + CELLS_READ, + CELLS_WRITTEN, + WALEDITS + } + + /** + * A mapper that writes out {@link Mutation} to be directly applied to + * a running HBase instance. */ protected static class WALMapper extends Mapper { @@ -148,6 +165,7 @@ protected static class WALMapper @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { + context.getCounter(Counter.WALEDITS).increment(1); try { if (tables.isEmpty() || tables.containsKey(key.getTableName())) { TableName targetTable = @@ -157,6 +175,7 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { Delete del = null; Cell lastCell = null; for (Cell cell : value.getCells()) { + context.getCounter(Counter.CELLS_READ).increment(1); // Filtering WAL meta marker entries. if (WALEdit.isMetaEditFamily(cell)) { continue; @@ -172,9 +191,11 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { // row or type changed, write out aggregate KVs. if (put != null) { context.write(tableOut, put); + context.getCounter(Counter.PUTS).increment(1); } if (del != null) { context.write(tableOut, del); + context.getCounter(Counter.DELETES).increment(1); } if (CellUtil.isDelete(cell)) { del = new Delete(CellUtil.cloneRow(cell)); @@ -187,14 +208,17 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { } else { put.add(cell); } + context.getCounter(Counter.CELLS_WRITTEN).increment(1); } lastCell = cell; } // write residual KVs if (put != null) { context.write(tableOut, put); + context.getCounter(Counter.PUTS).increment(1); } if (del != null) { + context.getCounter(Counter.DELETES).increment(1); context.write(tableOut, del); } } @@ -270,7 +294,7 @@ public Job createSubmittableJob(String[] args) throws IOException { setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; - String[] tables = args[1].split(","); + String[] tables = args.length == 1? new String [] {}: args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); @@ -278,7 +302,7 @@ public Job createSubmittableJob(String[] args) throws IOException { throw new IOException("The same number of tables and mapping must be provided."); } } else { - // if not mapping is specified map each table to itself + // if no mapping is specified, map each table to itself tableMap = tables; } conf.setStrings(TABLES_KEY, tables); @@ -349,27 +373,27 @@ private void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: " + NAME + " [options] []"); - System.err.println("Replay all WAL files into HBase."); - System.err.println(" is a comma separated list of tables."); - System.err.println("If no tables (\"\") are specified, all tables are imported."); - System.err.println("(Be careful, hbase:meta entries will be imported in this case.)\n"); - System.err.println("WAL entries can be mapped to new set of tables via ."); - System.err.println(" is a comma separated list of target tables."); - System.err.println("If specified, each table in must have a mapping.\n"); - System.err.println("By default " + NAME + " will load data directly into HBase."); - System.err.println("To generate HFiles for a bulk data load instead, pass the following option:"); - System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); - System.err.println(" (Only one table can be specified, and no mapping is allowed!)"); - System.err.println("Time range options:"); - System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); - System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); - System.err.println(" (The start and the end date of timerange. The dates can be expressed"); - System.err.println(" in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format."); - System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12)"); + System.err.println("Usage: " + NAME + " [options] [ ]"); + System.err.println(" directory of WALs to replay."); + System.err.println(" comma separated list of tables. If no tables specified,"); + System.err.println(" all are imported (even hbase:meta if present)."); + System.err.println(" WAL entries can be mapped to a new set of tables by passing"); + System.err.println(" , a comma separated list of target tables."); + System.err.println(" If specified, each table in must have a mapping."); + System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); + System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); + System.err.println(" Only one table can be specified, and no mapping allowed!"); + System.err.println("To specify a time range, pass:"); + System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); + System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); + System.err.println(" The start and the end date of timerange. The dates can be expressed"); + System.err.println(" in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format."); + System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); - System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); - System.err.println(" Use the specified mapreduce job name for the wal player"); + System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); + System.err.println(" Use the specified mapreduce job name for the wal player"); + System.err.println(" -Dwal.input.separator=' '"); + System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); @@ -387,7 +411,7 @@ public static void main(String[] args) throws Exception { @Override public int run(String[] args) throws Exception { - if (args.length < 2) { + if (args.length < 1) { usage("Wrong number of arguments: " + args.length); System.exit(-1); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 4880ab64e669..432aff1dd044 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -223,8 +223,8 @@ public void testMainMethod() throws Exception { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " []")); + assertTrue(data.toString().contains("Usage: WALPlayer [options] " + + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index d1bc2cf8e0cb..6ea23655d3c7 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -914,7 +914,7 @@ see <<_wal_tools>>. Invoke via: ---- -$ bin/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] []> +$ bin/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] [ ]> ---- For example: @@ -932,29 +932,27 @@ To NOT run WALPlayer as a mapreduce job on your cluster, force it to run all in Running `WALPlayer` with no arguments prints brief usage information: ---- -Usage: WALPlayer [options] [] -Replay all WAL files into HBase. - is a comma separated list of tables. -If no tables ("") are specified, all tables are imported. -(Be careful, hbase:meta entries will be imported in this case.) - -WAL entries can be mapped to new set of tables via . - is a comma separated list of target tables. -If specified, each table in must have a mapping. - -By default WALPlayer will load data directly into HBase. -To generate HFiles for a bulk data load instead, pass the following option: - -Dwal.bulk.output=/path/for/output - (Only one table can be specified, and no mapping is allowed!) -Time range options: - -Dwal.start.time=[date|ms] - -Dwal.end.time=[date|ms] - (The start and the end date of timerange. The dates can be expressed - in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format. - E.g. 1234567890120 or 2009-02-13T23:32:30.12) +Usage: WALPlayer [options] [ ] + directory of WALs to replay. + comma separated list of tables. If no tables specified, + all are imported (even hbase:meta if present). + WAL entries can be mapped to a new set of tables by passing + , a comma separated list of target tables. + If specified, each table in must have a mapping. +To generate HFiles to bulk load instead of loading HBase directly, pass: + -Dwal.bulk.output=/path/for/output + Only one table can be specified, and no mapping allowed! +To specify a time range, pass: + -Dwal.start.time=[date|ms] + -Dwal.end.time=[date|ms] + The start and the end date of timerange. The dates can be expressed + in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format. + E.g. 1234567890120 or 2009-02-13T23:32:30.12 Other options: - -Dmapreduce.job.name=jobName - Use the specified mapreduce job name for the wal player + -Dmapreduce.job.name=jobName + Use the specified mapreduce job name for the wal player + -Dwal.input.separator=' ' + Change WAL filename separator (WAL dir names use default ','.) For performance also consider the following options: -Dmapreduce.map.speculative=false -Dmapreduce.reduce.speculative=false From a7ddde100b0f624e50753ca504d783b696750fa9 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Thu, 1 Oct 2020 03:08:34 +0800 Subject: [PATCH 406/769] HBASE-25062 The link of "Re:(HBASE-451) Remove HTableDescriptor from HRegionInfo" invalid (#2455) Signed-off-by: Jan Hentschel Signed-off-by: Duo Zhang Signed-off-by: stack --- src/main/asciidoc/_chapters/developer.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 3e2e3938f459..a6939920cb45 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -2755,7 +2755,7 @@ However any substantive discussion (as with any off-list project-related discuss ==== Do not edit JIRA comments -Misspellings and/or bad grammar is preferable to the disruption a JIRA comment edit causes: See the discussion at link:http://search-hadoop.com/?q=%5BReopened%5D+%28HBASE-451%29+Remove+HTableDescriptor+from+HRegionInfo&fc_project=HBase[Re:(HBASE-451) Remove HTableDescriptor from HRegionInfo] +Misspellings and/or bad grammar is preferable to the disruption a JIRA comment edit. [[thirdparty]] === The hbase-thirdparty dependency and shading/relocation From 91a581f3b2868778fc00b27d1cb78310c592c13c Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 14:30:27 +0800 Subject: [PATCH 407/769] HBASE-25132 Migrate flaky test jenkins job from Hadoop to hbase (#2485) --- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index e043feeb342d..282b83115883 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { From bb8f719fdbd2dca71361be74c0d156fd56788d7f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 14:32:49 +0800 Subject: [PATCH 408/769] HBASE-25133 Migrate HBase Nightly jenkins job from Hadoop to hbase (#2487) --- dev-support/Jenkinsfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 5aaefd80ff07..c250dcefe604 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -17,7 +17,7 @@ pipeline { agent { node { - label 'Hadoop' + label 'hbase' } } triggers { @@ -192,7 +192,7 @@ pipeline { stage ('yetus general check') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { @@ -257,7 +257,7 @@ pipeline { stage ('yetus jdk7 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -338,7 +338,7 @@ pipeline { stage ('yetus jdk8 hadoop2 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -419,7 +419,7 @@ pipeline { stage ('yetus jdk8 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { @@ -504,7 +504,7 @@ pipeline { stage ('yetus jdk11 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } when { From 136884f8f631bf49bda920a6e98ea0a8e08d5d06 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 14:33:02 +0800 Subject: [PATCH 409/769] HBASE-25134 Migrate HBase PreCommit jenkins job from Hadoop to hbase (#2488) --- dev-support/Jenkinsfile_GitHub | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index d314ba45cd9c..801c2771fb95 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -18,7 +18,7 @@ pipeline { agent { - label 'Hadoop' + label 'hbase' } options { @@ -66,7 +66,7 @@ pipeline { stage ('yetus general check') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { @@ -152,7 +152,7 @@ pipeline { stage ('yetus jdk8 Hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { @@ -252,7 +252,7 @@ pipeline { stage ('yetus jdk11 hadoop3 checks') { agent { node { - label 'Hadoop' + label 'hbase' } } environment { From ad9946684ea2f938f53c140022073d89a27d742b Mon Sep 17 00:00:00 2001 From: Sanjeet Nishad Date: Thu, 1 Oct 2020 13:24:31 +0530 Subject: [PATCH 410/769] HBASE-24981 Enable table replication fails from 1.x to 2.x if table already exist at peer Closes #2353 Signed-off-by: stack Signed-off-by: Viraj Jasani Signed-off-by: Pankaj Kumar --- .../client/ColumnFamilyDescriptorBuilder.java | 5 --- .../TestColumnFamilyDescriptorBuilder.java | 31 +++++++++++++++++++ 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 3889d32dda54..9a47cb52fa95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -294,11 +294,6 @@ public static Map getDefaultValues() { DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); - DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); - DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); - DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN)); // Do NOT add this key/value by default. NEW_VERSION_BEHAVIOR is NOT defined in hbase1 so // it is not possible to make an hbase1 HCD the same as an hbase2 HCD and so the replication // compare of schemas will fail. It is OK not adding the below to the initial map because of diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index d6ea1b3cef8f..557d2f8dfb6e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -39,6 +39,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import java.util.Map; @Category({ MiscTests.class, SmallTests.class }) public class TestColumnFamilyDescriptorBuilder { @@ -181,4 +182,34 @@ public void testSetTimeToLive() throws HBaseException { builder.setTimeToLive(ttl); Assert.assertEquals(43282800, builder.build().getTimeToLive()); } + + /** + * Test for verifying the ColumnFamilyDescriptorBuilder's default values so that backward + * compatibility with hbase-1.x can be mantained (see HBASE-24981). + */ + @Test + public void testDefaultBuilder() { + final Map defaultValueMap = ColumnFamilyDescriptorBuilder.getDefaultValues(); + assertEquals(defaultValueMap.size(), 11); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOOMFILTER), + BloomType.ROW.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE), "0"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.MAX_VERSIONS), "1"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.MIN_VERSIONS), "0"); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.COMPRESSION), + Compression.Algorithm.NONE.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.TTL), + Integer.toString(Integer.MAX_VALUE)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOCKSIZE), + Integer.toString(64 * 1024)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.IN_MEMORY), + Boolean.toString(false)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.BLOCKCACHE), + Boolean.toString(true)); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS), + KeepDeletedCells.FALSE.toString()); + assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING), + DataBlockEncoding.NONE.toString()); + + } } From f5a776149996469a36ac254affc8cbcdb619f9a8 Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Thu, 1 Oct 2020 18:07:28 +0530 Subject: [PATCH 411/769] HBASE-25135 Convert the internal seperator while emitting the memstore read metrics to # (#2486) Signed-off-by: Anoop Sam John --- .../hadoop/hbase/regionserver/MetricsTableSourceImpl.java | 2 +- .../hbase/regionserver/MetricsTableWrapperAggregate.java | 2 +- .../hadoop/hbase/regionserver/MetricsTableWrapperStub.java | 4 ++-- .../hbase/regionserver/MetricsTableWrapperAggregateImpl.java | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index b39e1444dd2f..85f5bded98a8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -337,7 +337,7 @@ private void addGauge(MetricsRecordBuilder mrb, Map metricMap, Str for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY - + entry.getKey().split(MetricsTableWrapperAggregate.UNDERSCORE)[1] + + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] + this.tableNamePrefixPart2 + metricName, metricDesc), entry.getValue()); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index 4b8c46af2c0f..40fd6d8effaf 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -28,7 +28,7 @@ */ @InterfaceAudience.Private public interface MetricsTableWrapperAggregate { - public String UNDERSCORE = "_"; + public String HASH = "#"; /** * Get the number of read requests that have been issued against this table */ diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index 9a860a041d45..dbdc92da8ac4 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -116,14 +116,14 @@ public long getCpRequestsCount(String table) { @Override public Map getMemstoreOnlyRowReadsCount(String table) { Map map = new HashMap(); - map.put("table_info", 3L); + map.put("table#info", 3L); return map; } @Override public Map getMixedRowReadsCount(String table) { Map map = new HashMap(); - map.put("table_info", 3L); + map.put("table#info", 3L); return map; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java index 7b5c6ef9701d..77130b8da4cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java @@ -94,7 +94,7 @@ public void run() { (long) store.getAvgStoreFileAge().getAsDouble() * store.getStorefilesCount(); } mt.storeCount += 1; - tempKey = tbl.getNameAsString() + UNDERSCORE + familyName; + tempKey = tbl.getNameAsString() + HASH + familyName; Long tempVal = mt.perStoreMemstoreOnlyReadCount.get(tempKey); if (tempVal == null) { tempVal = 0L; From aa96acb0cf0d4893300a61b8c6c0138b4d099334 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 1 Oct 2020 22:15:15 +0800 Subject: [PATCH 412/769] Revert "HBASE-25134 Migrate HBase PreCommit jenkins job from Hadoop to hbase (#2488)" This reverts commit 5351aca8a1e01df4aefb0cfc1c0e1892dcd56caa. --- dev-support/Jenkinsfile_GitHub | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index 801c2771fb95..d314ba45cd9c 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -18,7 +18,7 @@ pipeline { agent { - label 'hbase' + label 'Hadoop' } options { @@ -66,7 +66,7 @@ pipeline { stage ('yetus general check') { agent { node { - label 'hbase' + label 'Hadoop' } } environment { @@ -152,7 +152,7 @@ pipeline { stage ('yetus jdk8 Hadoop3 checks') { agent { node { - label 'hbase' + label 'Hadoop' } } environment { @@ -252,7 +252,7 @@ pipeline { stage ('yetus jdk11 hadoop3 checks') { agent { node { - label 'hbase' + label 'Hadoop' } } environment { From d45f37c30f0f77a60676798a61794a3265b0cfc4 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 1 Oct 2020 10:04:58 -0700 Subject: [PATCH 413/769] HBASE-25091 Move LogComparator from ReplicationSource to AbstractFSWALProvider#.WALsStartTimeComparator (#2449) Give the comparator a more descriptive name, a better location, and make it work even when passed hbase:meta WAL files. Signed-off-by: Duo Zhang Signed-off-by: Guanghao Zhang --- .../RecoveredReplicationSource.java | 4 +- .../regionserver/ReplicationSource.java | 29 +-------- .../hbase/wal/AbstractFSWALProvider.java | 36 +++++++++-- .../hadoop/hbase/wal/TestWALProvider.java | 62 +++++++++++++++++++ 4 files changed, 98 insertions(+), 33 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 00aa026093fa..46cf851b9723 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -64,8 +64,8 @@ protected RecoveredReplicationSourceShipper createNewShipper(String walGroupId, public void locateRecoveredPaths(PriorityBlockingQueue queue) throws IOException { boolean hasPathChanged = false; - PriorityBlockingQueue newPaths = - new PriorityBlockingQueue(queueSizePerGroup, new LogsComparator()); + PriorityBlockingQueue newPaths = new PriorityBlockingQueue(queueSizePerGroup, + new AbstractFSWALProvider.WALStartTimeComparator()); pathsLoop: for (Path path : queue) { if (fs.exists(path)) { // still in same location, don't need to do anything newPaths.add(path); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 82120736bd42..4b034f56a93c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -253,7 +252,8 @@ public void enqueueLog(Path wal) { String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal.getName()); PriorityBlockingQueue queue = queues.get(walPrefix); if (queue == null) { - queue = new PriorityBlockingQueue<>(queueSizePerGroup, new LogsComparator()); + queue = new PriorityBlockingQueue<>(queueSizePerGroup, + new AbstractFSWALProvider.WALStartTimeComparator()); // make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise // the shipper may quit immediately queue.put(wal); @@ -759,31 +759,6 @@ public boolean isSourceActive() { return !this.server.isStopped() && this.sourceRunning; } - /** - * Comparator used to compare logs together based on their start time - */ - public static class LogsComparator implements Comparator { - - @Override - public int compare(Path o1, Path o2) { - return Long.compare(getTS(o1), getTS(o2)); - } - - /** - *

    - * Split a path to get the start time - *

    - *

    - * For example: 10.20.20.171%3A60020.1277499063250 - *

    - * @param p path to split - * @return start time - */ - private static long getTS(Path p) { - return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName()); - } - } - public ReplicationQueueInfo getReplicationQueueInfo() { return replicationQueueInfo; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 94ae70467793..6f9c87b00518 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hbase.wal; +import static org.apache.commons.lang3.StringUtils.isNumeric; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; @@ -418,6 +420,36 @@ public static boolean isMetaFile(String p) { return p != null && p.endsWith(META_WAL_PROVIDER_ID); } + /** + * Comparator used to compare WAL files together based on their start time. + * Just compares start times and nothing else. + */ + public static class WALStartTimeComparator implements Comparator { + @Override + public int compare(Path o1, Path o2) { + return Long.compare(getTS(o1), getTS(o2)); + } + + /** + * Split a path to get the start time + * For example: 10.20.20.171%3A60020.1277499063250 + * Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL + * which adds a '.syncrep' suffix. Check. + * @param p path to split + * @return start time + */ + private static long getTS(Path p) { + String name = p.getName(); + String [] splits = name.split("\\."); + String ts = splits[splits.length - 1]; + if (!isNumeric(ts)) { + // Its a '.meta' or a '.syncrep' suffix. + ts = splits[splits.length - 2]; + } + return Long.parseLong(ts); + } + } + public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); @@ -545,8 +577,4 @@ private static String getWALNameGroupFromWALName(String name, int group) { public static String getWALPrefixFromWALName(String name) { return getWALNameGroupFromWALName(name, 1); } - - public static long getWALStartTimeFromWALName(String name) { - return Long.parseLong(getWALNameGroupFromWALName(name, 2)); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java new file mode 100644 index 000000000000..bc06147d7cca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.wal; + +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import java.util.Comparator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RegionServerTests.class, SmallTests.class}) +public class TestWALProvider { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWALProvider.class); + + /** + * Test start time comparator. + */ + @Test + public void testWALStartTimeComparator() throws IOException { + Path metaPath1 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604319.meta"); + Path metaPath2 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604320.meta"); + Path path3 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.1600304604321"); + Path metaPath4 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604321.meta"); + Comparator c = new AbstractFSWALProvider.WALStartTimeComparator(); + assertTrue(c.compare(metaPath1, metaPath1) == 0); + assertTrue(c.compare(metaPath2, metaPath2) == 0); + assertTrue(c.compare(metaPath1, metaPath2) < 0); + assertTrue(c.compare(metaPath2, metaPath1) > 0); + assertTrue(c.compare(metaPath2, path3) < 0); + assertTrue(c.compare(path3, metaPath4) == 0); + } +} From 4d617c6d9eb4d274826782d36312182e2666dc52 Mon Sep 17 00:00:00 2001 From: SteNicholas Date: Fri, 2 Oct 2020 21:27:01 +0800 Subject: [PATCH 414/769] HBASE-25120 Remove the deprecated annotation for MetaTableAccessor.getScanForTableName (#2493) Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/MetaTableAccessor.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index c7f7ec197a9e..b9ec944ee9e3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -319,9 +319,7 @@ public static List getTableRegions(Connection connection, TableName * and scan until it hits a new table since that requires parsing the HRI to get the table name. * @param tableName bytes of table's name * @return configured Scan object - * @deprecated This is internal so please remove it when we get a chance. */ - @Deprecated public static Scan getScanForTableName(Connection connection, TableName tableName) { // Start key is just the table name with delimiters byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION); From 20fa11c9371d6ca0cb30fb63f783eb6b0503f253 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 3 Oct 2020 20:48:38 +0800 Subject: [PATCH 415/769] HBASE-25080 Should not use AssignmentManager to test whether a table is enabled or not (#2436) Signed-off-by: Guanghao Zhang Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../master/assignment/AssignmentManager.java | 6 +++--- .../AbstractStateMachineTableProcedure.java | 5 +++++ .../master/procedure/ModifyTableProcedure.java | 2 +- .../procedure/ReopenTableRegionsProcedure.java | 3 +-- .../main/resources/hbase-webapps/master/table.jsp | 5 +++-- .../master/assignment/MockMasterServices.java | 15 ++++----------- 6 files changed, 17 insertions(+), 19 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 5638af5af48f..d2e1eb3e9d42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -386,15 +386,15 @@ public List getFavoredNodes(final RegionInfo regionInfo) { // ============================================================================================ // Table State Manager helpers // ============================================================================================ - TableStateManager getTableStateManager() { + private TableStateManager getTableStateManager() { return master.getTableStateManager(); } - public boolean isTableEnabled(final TableName tableName) { + private boolean isTableEnabled(final TableName tableName) { return getTableStateManager().isTableState(tableName, TableState.State.ENABLED); } - public boolean isTableDisabled(final TableName tableName) { + private boolean isTableDisabled(final TableName tableName) { return getTableStateManager().isTableState(tableName, TableState.State.DISABLED, TableState.State.DISABLING); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java index 1edfc74179ae..9b1dfc6a23a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java @@ -176,6 +176,11 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H } } + protected boolean isTableEnabled(MasterProcedureEnv env) { + return env.getMasterServices().getTableStateManager().isTableState(getTableName(), + TableState.State.ENABLED); + } + /** * Check region is online. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 64f4bf6c84d9..892ef28ef23f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -149,7 +149,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); break; case MODIFY_TABLE_REOPEN_ALL_REGIONS: - if (env.getAssignmentManager().isTableEnabled(getTableName())) { + if (isTableEnabled(env)) { addChildProcedure(new ReopenTableRegionsProcedure(getTableName())); } if (deleteColumnFamilyInModify) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java index 7bf834c62c8c..ffa485d5465c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java @@ -22,7 +22,6 @@ import java.util.Collections; import java.util.List; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.assignment.RegionStateNode; @@ -105,7 +104,7 @@ protected Flow executeFromState(MasterProcedureEnv env, ReopenTableRegionsState throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { switch (state) { case REOPEN_TABLE_REGIONS_GET_REGIONS: - if (!env.getAssignmentManager().isTableEnabled(tableName)) { + if (!isTableEnabled(env)) { LOG.info("Table {} is disabled, give up reopening its regions", tableName); return Flow.NO_MORE_STATE; } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 23eeb3ab740f..25b5979ae8c8 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -53,6 +53,7 @@ import="org.apache.hadoop.hbase.client.RegionLocator" import="org.apache.hadoop.hbase.client.RegionReplicaUtil" import="org.apache.hadoop.hbase.client.Table" + import="org.apache.hadoop.hbase.client.TableState" import="org.apache.hadoop.hbase.client.ColumnFamilyDescriptor" import="org.apache.hadoop.hbase.http.InfoServer" import="org.apache.hadoop.hbase.master.HMaster" @@ -647,14 +648,14 @@ Enabled - <%= master.getAssignmentManager().isTableEnabled(table.getName()) %> + <%= master.getTableStateManager().isTableState(table.getName(), TableState.State.ENABLED) %> Is the table enabled Compaction <% - if (master.getAssignmentManager().isTableEnabled(table.getName())) { + if (master.getTableStateManager().isTableState(table.getName(), TableState.State.ENABLED)) { CompactionState compactionState = master.getCompactionState(table.getName()); %><%= compactionState==null?"UNKNOWN":compactionState %><% } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index 5e78c3dc7758..e899cee3ea47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import static org.mockito.ArgumentMatchers.any; + import java.io.IOException; import java.util.List; import java.util.Map; @@ -61,7 +62,9 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; @@ -105,17 +108,7 @@ public MockMasterServices(Configuration conf, null: new SplitWALManager(this); // Mock an AM. - this.assignmentManager = new AssignmentManager(this, new MockRegionStateStore(this)) { - @Override - public boolean isTableEnabled(final TableName tableName) { - return true; - } - - @Override - public boolean isTableDisabled(final TableName tableName) { - return false; - } - }; + this.assignmentManager = new AssignmentManager(this, new MockRegionStateStore(this)); this.balancer = LoadBalancerFactory.getLoadBalancer(conf); this.serverManager = new ServerManager(this); this.tableStateManager = Mockito.mock(TableStateManager.class); From dbdc37b41b070b677c0486311c670ec3d225b983 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Sun, 4 Oct 2020 16:02:12 +0530 Subject: [PATCH 416/769] HBASE-25115 HFilePrettyPrinter can't seek to the row which is the first row of a hfile Closes #2473 Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../hbase/io/hfile/HFilePrettyPrinter.java | 10 +++++----- .../io/hfile/TestHFilePrettyPrinter.java | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 93d85af677b8..02efa8e89863 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -322,16 +322,16 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { // scan over file and read key/value's and check if requested HFileScanner scanner = reader.getScanner(false, false, false); fileStats = new KeyValueStatsCollector(); - boolean shouldScanKeysValues = false; - if (this.isSeekToRow) { + boolean shouldScanKeysValues; + if (this.isSeekToRow && !Bytes.equals(row, reader.getFirstRowKey().orElse(null))) { // seek to the first kv on this row - shouldScanKeysValues = - (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1); + shouldScanKeysValues = (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1); } else { shouldScanKeysValues = scanner.seekTo(); } - if (shouldScanKeysValues) + if (shouldScanKeysValues) { scanKeysValues(file, fileStats, scanner, row); + } } // print meta data diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java index 8fab5a3df8d2..c7ac97aa94f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java @@ -108,4 +108,23 @@ public void testHFilePrettyPrinterRootDir() throws Exception { String expectedResult = "Scanning -> " + fileInRootDir + "\n" + "Scanned kv count -> 1000\n"; assertEquals(expectedResult, result); } + + @Test + public void testHFilePrettyPrinterSeekFirstRow() throws Exception { + Path fileNotInRootDir = UTIL.getDataTestDir("hfile"); + TestHRegionServerBulkLoad.createHFile(fs, fileNotInRootDir, cf, fam, value, 1000); + assertNotEquals("directory used is not an HBase root dir", UTIL.getDefaultRootDirPath(), + fileNotInRootDir); + + HFile.Reader reader = + HFile.createReader(fs, fileNotInRootDir, CacheConfig.DISABLED, true, conf); + String firstRowKey = new String(reader.getFirstRowKey().get()); + + System.setOut(ps); + new HFilePrettyPrinter(conf) + .run(new String[] { "-v", "-w" + firstRowKey, String.valueOf(fileNotInRootDir) }); + String result = new String(stream.toByteArray()); + String expectedResult = "Scanning -> " + fileNotInRootDir + "\n" + "Scanned kv count -> 1\n"; + assertEquals(expectedResult, result); + } } From 29bf225dd043d36a0cacdd9f6e0129d8cfb38e36 Mon Sep 17 00:00:00 2001 From: Joseph295 <517536891@qq.com> Date: Mon, 5 Oct 2020 12:39:27 +0800 Subject: [PATCH 417/769] HBASE-25048 [HBCK2] Bypassed parent procedures are not updated in store (#2410) Signed-off-by: Duo Zhang Signed-off-by: stack --- .../org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index f8857859131a..b99f544628bb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -981,7 +981,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur while (current != null) { LOG.debug("Bypassing {}", current); current.bypass(getEnvironment()); - store.update(procedure); + store.update(current); long parentID = current.getParentProcId(); current = getProcedure(parentID); } From 758c76d5f2a58d6b74e71347e10d0fc87134722a Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 5 Oct 2020 15:37:34 +0530 Subject: [PATCH 418/769] HBASE-25147 : Serialize regionNames in ReopenTableRegionsProcedure Closes #2494 Signed-off-by: Duo Zhang --- .../server/master/MasterProcedure.proto | 1 + .../ReopenTableRegionsProcedure.java | 24 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index b18de27a0c9a..59a1d68fda44 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -487,6 +487,7 @@ enum ReopenTableRegionsState { message ReopenTableRegionsStateData { required TableName table_name = 1; repeated RegionLocation region = 2; + repeated bytes region_names = 3; } enum InitMetaState { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java index ffa485d5465c..aa89094501db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java @@ -36,6 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -56,19 +57,19 @@ public class ReopenTableRegionsProcedure // Specify specific regions of a table to reopen. // if specified null, all regions of the table will be reopened. - private final List regionNames; + private List regionNames; private List regions = Collections.emptyList(); private RetryCounter retryCounter; public ReopenTableRegionsProcedure() { - regionNames = null; + regionNames = Collections.emptyList(); } public ReopenTableRegionsProcedure(TableName tableName) { this.tableName = tableName; - this.regionNames = null; + this.regionNames = Collections.emptyList(); } public ReopenTableRegionsProcedure(final TableName tableName, @@ -223,6 +224,17 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO ReopenTableRegionsStateData.Builder builder = ReopenTableRegionsStateData.newBuilder() .setTableName(ProtobufUtil.toProtoTableName(tableName)); regions.stream().map(ProtobufUtil::toRegionLocation).forEachOrdered(builder::addRegion); + if (CollectionUtils.isNotEmpty(regionNames)) { + // As of this writing, wrapping this statement withing if condition is only required + // for backward compatibility as we used to have 'regionNames' as null for cases + // where all regions of given table should be reopened. Now, we have kept emptyList() + // for 'regionNames' to indicate all regions of given table should be reopened unless + // 'regionNames' contains at least one specific region, in which case only list of regions + // that 'regionNames' contain should be reopened, not all regions of given table. + // Now, we don't need this check since we are not dealing with null 'regionNames' and hence, + // guarding by this if condition can be removed in HBase 4.0.0. + regionNames.stream().map(ByteString::copyFrom).forEachOrdered(builder::addRegionNames); + } serializer.serialize(builder.build()); } @@ -233,5 +245,11 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws tableName = ProtobufUtil.toTableName(data.getTableName()); regions = data.getRegionList().stream().map(ProtobufUtil::toRegionLocation) .collect(Collectors.toList()); + if (CollectionUtils.isNotEmpty(data.getRegionNamesList())) { + regionNames = data.getRegionNamesList().stream().map(ByteString::toByteArray) + .collect(Collectors.toList()); + } else { + regionNames = Collections.emptyList(); + } } } From 86b46e91c47dacce328966b2a56e5e52b2f317f7 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 5 Oct 2020 21:29:55 +0800 Subject: [PATCH 419/769] HBASE-25121 Refactor MetaTableAccessor.addRegionsToMeta and its usage places (#2476) Signed-off-by: stack --- .../hadoop/hbase/MetaTableAccessor.java | 87 +++++-------------- .../hadoop/hbase/CatalogFamilyFormat.java | 3 +- .../master/assignment/RegionStateStore.java | 79 +++++++++++++++++ .../hbase/master/janitor/MetaFixer.java | 15 ++-- .../procedure/CreateTableProcedure.java | 15 ++-- .../procedure/DeleteTableProcedure.java | 32 ++++--- .../procedure/ModifyTableProcedure.java | 69 +++------------ .../hbase/util/ServerRegionReplicaUtil.java | 43 ++++----- .../hadoop/hbase/HBaseTestingUtility.java | 2 +- .../hadoop/hbase/TestMetaTableAccessor.java | 58 ++----------- .../hadoop/hbase/client/TestEnableTable.java | 6 +- .../assignment/TestAssignmentManager.java | 4 +- .../assignment/TestRegionStateStore.java | 37 ++++++++ 13 files changed, 217 insertions(+), 233 deletions(-) diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index b9ec944ee9e3..7ec2a22e7833 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.ClientMetaTableAccessor.QueryType; import org.apache.hadoop.hbase.client.Connection; @@ -247,7 +247,7 @@ public static Result scanByRegionEncodedName(Connection connection, String regio throws IOException { RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); - Scan scan = getMetaScan(connection, 1); + Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setFilter(rowFilter); try (Table table = getMetaHTable(connection); ResultScanner resultScanner = table.getScanner(scan)) { @@ -320,24 +320,23 @@ public static List getTableRegions(Connection connection, TableName * @param tableName bytes of table's name * @return configured Scan object */ - public static Scan getScanForTableName(Connection connection, TableName tableName) { + public static Scan getScanForTableName(Configuration conf, TableName tableName) { // Start key is just the table name with delimiters byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION); // Stop key appends the smallest possible char to the table name byte[] stopKey = ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION); - Scan scan = getMetaScan(connection, -1); + Scan scan = getMetaScan(conf, -1); scan.withStartRow(startKey); scan.withStopRow(stopKey); return scan; } - private static Scan getMetaScan(Connection connection, int rowUpperLimit) { + private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { Scan scan = new Scan(); - int scannerCaching = connection.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING, + int scannerCaching = conf.getInt(HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); - if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS)) { + if (conf.getBoolean(HConstants.USE_META_REPLICAS, HConstants.DEFAULT_USE_META_REPLICAS)) { scan.setConsistency(Consistency.TIMELINE); } if (rowUpperLimit > 0) { @@ -469,7 +468,7 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, final ClientMetaTableAccessor.Visitor visitor) throws IOException { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; - Scan scan = getMetaScan(connection, rowUpperLimit); + Scan scan = getMetaScan(connection.getConfiguration(), rowUpperLimit); for (byte[] family : type.getFamilies()) { scan.addFamily(family); @@ -525,7 +524,7 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR private static RegionInfo getClosestRegionInfo(Connection connection, @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); - Scan scan = getMetaScan(connection, 1); + Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setReversed(true); scan.withStartRow(searchRow); try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { @@ -733,37 +732,6 @@ private static void deleteFromMetaTable(final Connection connection, final List< } } - /** - * Deletes some replica columns corresponding to replicas for the passed rows - * @param metaRows rows in hbase:meta - * @param replicaIndexToDeleteFrom the replica ID we would start deleting from - * @param numReplicasToRemove how many replicas to remove - * @param connection connection we're using to access meta table - */ - public static void removeRegionReplicasFromMeta(Set metaRows, - int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection) - throws IOException { - int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove; - for (byte[] row : metaRows) { - long now = EnvironmentEdgeManager.currentTime(); - Delete deleteReplicaLocations = new Delete(row); - for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) { - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getSeqNumColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getStartCodeColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerNameColumn(i), now); - deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getRegionStateColumn(i), now); - } - - deleteFromMetaTable(connection, deleteReplicaLocations); - } - } - public static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.STATE_QUALIFIER) @@ -804,22 +772,6 @@ public static void addSplitsToParent(Connection connection, RegionInfo regionInf } } - /** - * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this - * does not add its daughter's as different rows, but adds information about the daughters in the - * same row as the parent. Use - * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} if - * you want to do that. - * @param connection connection we're using - * @param regionInfo region information - * @throws IOException if problem connecting or updating meta - */ - @VisibleForTesting - public static void addRegionToMeta(Connection connection, RegionInfo regionInfo) - throws IOException { - addRegionsToMeta(connection, Collections.singletonList(regionInfo), 1); - } - /** * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is * CLOSED. @@ -845,17 +797,18 @@ public static void addRegionsToMeta(Connection connection, List regi int regionReplication, long ts) throws IOException { List puts = new ArrayList<>(); for (RegionInfo regionInfo : regionInfos) { - if (RegionReplicaUtil.isDefaultReplica(regionInfo)) { - Put put = makePutFromRegionInfo(regionInfo, ts); - // New regions are added with initial state of CLOSED. - addRegionStateToPut(put, RegionState.State.CLOSED); - // Add empty locations for region replicas so that number of replicas can be cached - // whenever the primary region is looked up from meta - for (int i = 1; i < regionReplication; i++) { - addEmptyLocation(put, i); - } - puts.add(put); + if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) { + continue; + } + Put put = makePutFromRegionInfo(regionInfo, ts); + // New regions are added with initial state of CLOSED. + addRegionStateToPut(put, RegionState.State.CLOSED); + // Add empty locations for region replicas so that number of replicas can be cached + // whenever the primary region is looked up from meta + for (int i = 1; i < regionReplication; i++) { + addEmptyLocation(put, i); } + puts.add(put); } putsToMetaTable(connection, puts); LOG.info("Added {} regions to meta.", puts.size()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index d0ee3dc83326..16337072aa7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -311,8 +311,7 @@ public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) { * @param replicaId the replicaId of the region * @return a byte[] for state qualifier */ - @VisibleForTesting - static byte[] getRegionStateColumn(int replicaId) { + public static byte[] getRegionStateColumn(int replicaId) { return replicaId == 0 ? HConstants.STATE_QUALIFIER : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 935f61abd2f1..500e5ec79da0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -545,6 +547,83 @@ public void overwriteRegions(List regionInfos, int regionReplication LOG.debug("Overwritten regions: {} ", regionInfos); } + /** + * Update region replicas if necessary by adding new replica locations or removing unused region + * replicas + */ + public void updateRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + throws IOException { + if (newReplicaCount < oldReplicaCount) { + removeRegionReplicas(tableName, oldReplicaCount, newReplicaCount); + } else if (newReplicaCount > oldReplicaCount) { + addRegionReplicas(tableName, oldReplicaCount, newReplicaCount); + } + } + + private Scan getScanForUpdateRegionReplicas(TableName tableName) { + return MetaTableAccessor.getScanForTableName(master.getConfiguration(), tableName) + .addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + } + + private void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + throws IOException { + Scan scan = getScanForUpdateRegionReplicas(tableName); + List deletes = new ArrayList<>(); + long now = EnvironmentEdgeManager.currentTime(); + try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); + if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { + continue; + } + Delete delete = new Delete(result.getRow()); + for (int i = newReplicaCount; i < oldReplicaCount; i++) { + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getRegionStateColumn(i), + now); + } + deletes.add(delete); + } + debugLogMutations(deletes); + metaTable.delete(deletes); + } + } + + private void addRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + throws IOException { + Scan scan = getScanForUpdateRegionReplicas(tableName); + List puts = new ArrayList<>(); + long now = EnvironmentEdgeManager.currentTime(); + try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); + if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { + continue; + } + Put put = new Put(result.getRow(), now); + for (int i = oldReplicaCount; i < newReplicaCount; i++) { + MetaTableAccessor.addEmptyLocation(put, i); + } + puts.add(put); + } + debugLogMutations(puts); + metaTable.put(puts); + } + } + // ========================================================================== // Table Descriptors helpers // ========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index 6f5162775da1..f9dc1ccb5aaf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -29,6 +29,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -190,8 +192,8 @@ private static List createMetaEntries(final MasterServices masterSer // Add replicas if needed // we need to create regions with replicaIds starting from 1 - List newRegions = RegionReplicaUtil.addReplicas( - Collections.singletonList(regionInfo), 1, td.getRegionReplication()); + List newRegions = RegionReplicaUtil + .addReplicas(Collections.singletonList(regionInfo), 1, td.getRegionReplication()); // Add regions to META MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), newRegions, @@ -199,12 +201,13 @@ private static List createMetaEntries(final MasterServices masterSer // Setup replication for region replicas if needed if (td.getRegionReplication() > 1) { - ServerRegionReplicaUtil.setupRegionReplicaReplication( - masterServices.getConfiguration()); + ServerRegionReplicaUtil.setupRegionReplicaReplication(masterServices); } - return Either., IOException>ofLeft(newRegions); + return Either., IOException> ofLeft(newRegions); } catch (IOException e) { - return Either., IOException>ofRight(e); + return Either., IOException> ofRight(e); + } catch (ReplicationException e) { + return Either., IOException> ofRight(new HBaseIOException(e)); } }) .collect(Collectors.toList()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index e7162d9b3add..3f171ee694d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableExistsException; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -363,23 +365,26 @@ protected static void moveTempDirectoryToHBaseRoot( } protected static List addTableToMeta(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, - final List regions) throws IOException { + final TableDescriptor tableDescriptor, final List regions) throws IOException { assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions; ProcedureSyncWait.waitMetaRegions(env); // Add replicas if needed // we need to create regions with replicaIds starting from 1 - List newRegions = RegionReplicaUtil.addReplicas(regions, 1, - tableDescriptor.getRegionReplication()); + List newRegions = + RegionReplicaUtil.addReplicas(regions, 1, tableDescriptor.getRegionReplication()); // Add regions to META addRegionsToMeta(env, tableDescriptor, newRegions); // Setup replication for region replicas if needed if (tableDescriptor.getRegionReplication() > 1) { - ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + try { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); + } catch (ReplicationException e) { + throw new HBaseIOException(e); + } } return newRegions; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 5b118a4f37c5..9cfce0ce3632 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -40,12 +39,14 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -357,22 +358,29 @@ protected static void deleteFromFs(final MasterProcedureEnv env, /** * There may be items for this table still up in hbase:meta in the case where the info:regioninfo * column was empty because of some write error. Remove ALL rows from hbase:meta that have to do - * with this table. See HBASE-12980. + * with this table. + *

    + * See HBASE-12980. */ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final TableName tableName) - throws IOException { - Connection connection = env.getMasterServices().getConnection(); - Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { - List deletes = new ArrayList<>(); - try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { - for (Result result : resScanner) { - deletes.add(new Delete(result.getRow())); + throws IOException { + Scan tableScan = MetaTableAccessor.getScanForTableName(env.getMasterConfiguration(), tableName) + .setFilter(new KeyOnlyFilter()); + long now = EnvironmentEdgeManager.currentTime(); + List deletes = new ArrayList<>(); + try ( + Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME); + ResultScanner scanner = metaTable.getScanner(tableScan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; } + deletes.add(new Delete(result.getRow(), now)); } if (!deletes.isEmpty()) { - LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " - + TableName.META_TABLE_NAME); + LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " + + TableName.META_TABLE_NAME); metaTable.delete(deletes); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 892ef28ef23f..9b29d30b9b36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Supplier; @@ -29,20 +28,15 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -346,8 +340,6 @@ private static boolean isDeleteColumnFamily(TableDescriptor originalDescriptor, * Action before modifying table. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void preModify(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { @@ -357,7 +349,6 @@ private void preModify(final MasterProcedureEnv env, final ModifyTableState stat /** * Update descriptor * @param env MasterProcedureEnv - * @throws IOException **/ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor); @@ -366,7 +357,6 @@ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOExcept /** * Removes from hdfs the families that are not longer present in the new table descriptor. * @param env MasterProcedureEnv - * @throws IOException */ private void deleteFromFs(final MasterProcedureEnv env, final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor) @@ -386,61 +376,28 @@ private void deleteFromFs(final MasterProcedureEnv env, /** * update replica column families if necessary. - * @param env MasterProcedureEnv - * @throws IOException */ - private void updateReplicaColumnsIfNeeded( - final MasterProcedureEnv env, - final TableDescriptor oldTableDescriptor, - final TableDescriptor newTableDescriptor) throws IOException { + private void updateReplicaColumnsIfNeeded(MasterProcedureEnv env, + TableDescriptor oldTableDescriptor, TableDescriptor newTableDescriptor) throws IOException { final int oldReplicaCount = oldTableDescriptor.getRegionReplication(); final int newReplicaCount = newTableDescriptor.getRegionReplication(); - - if (newReplicaCount < oldReplicaCount) { - Set tableRows = new HashSet<>(); - Connection connection = env.getMasterServices().getConnection(); - Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName()); - scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { - ResultScanner resScanner = metaTable.getScanner(scan); - for (Result result : resScanner) { - tableRows.add(result.getRow()); - } - MetaTableAccessor.removeRegionReplicasFromMeta( - tableRows, - newReplicaCount, - oldReplicaCount - newReplicaCount, - connection); - } - } - if (newReplicaCount > oldReplicaCount) { - Connection connection = env.getMasterServices().getConnection(); - // Get the existing table regions - List existingTableRegions = - MetaTableAccessor.getTableRegions(connection, getTableName()); - // add all the new entries to the meta table - addRegionsToMeta(env, newTableDescriptor, existingTableRegions); - if (oldReplicaCount <= 1) { - // The table has been newly enabled for replica. So check if we need to setup - // region replication - ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + env.getAssignmentManager().getRegionStateStore().updateRegionReplicas(getTableName(), + oldReplicaCount, newReplicaCount); + if (newReplicaCount > oldReplicaCount && oldReplicaCount <= 1) { + // The table has been newly enabled for replica. So check if we need to setup + // region replication + try { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); + } catch (ReplicationException e) { + throw new HBaseIOException(e); } } } - private static void addRegionsToMeta(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, final List regionInfos) - throws IOException { - MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), regionInfos, - tableDescriptor.getRegionReplication()); - } /** * Action after modifying table. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void postModify(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { @@ -451,8 +408,6 @@ private void postModify(final MasterProcedureEnv env, final ModifyTableState sta * Coprocessor Action. * @param env MasterProcedureEnv * @param state the procedure state - * @throws IOException - * @throws InterruptedException */ private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state) throws IOException, InterruptedException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index b83749d9c337..fbd8d30bba66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -22,16 +22,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.Reference; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint; import org.apache.hadoop.hbase.zookeeper.ZKConfig; @@ -155,34 +154,24 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, /** * Create replication peer for replicating to region replicas if needed. - * @param conf configuration to use - * @throws IOException + *

    + * This methods should only be called at master side. */ - public static void setupRegionReplicaReplication(Configuration conf) throws IOException { - if (!isRegionReplicaReplicationEnabled(conf)) { + public static void setupRegionReplicaReplication(MasterServices services) + throws IOException, ReplicationException { + if (!isRegionReplicaReplicationEnabled(services.getConfiguration())) { return; } - - try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { - ReplicationPeerConfig peerConfig = null; - try { - peerConfig = admin.getReplicationPeerConfig(REGION_REPLICA_REPLICATION_PEER); - } catch (ReplicationPeerNotFoundException e) { - LOG.warn( - "Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + " not exist", - e); - } - - if (peerConfig == null) { - LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER - + " not exist. Creating..."); - peerConfig = new ReplicationPeerConfig(); - peerConfig.setClusterKey(ZKConfig.getZooKeeperClusterKey(conf)); - peerConfig.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()); - admin.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig); - } + if (services.getReplicationPeerManager().getPeerConfig(REGION_REPLICA_REPLICATION_PEER) + .isPresent()) { + return; } + LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + + " not exist. Creating..."); + ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(services.getConfiguration())) + .setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()).build(); + services.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig, true); } public static boolean isRegionReplicaReplicationEnabled(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index f60acd732334..528b155cb94c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2489,7 +2489,7 @@ public List createMultiRegionsInMeta(final Configuration conf, .setStartKey(startKeys[i]) .setEndKey(startKeys[j]) .build(); - MetaTableAccessor.addRegionToMeta(getConnection(), hri); + MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1); newRegions.add(hri); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java index dc4b6a85a9b9..28ce7d8dae0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java @@ -29,9 +29,8 @@ import static org.mockito.Mockito.verify; import java.io.IOException; -import java.util.HashMap; +import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; @@ -67,7 +66,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** * Test {@link org.apache.hadoop.hbase.MetaTableAccessor}. @@ -143,9 +141,11 @@ void metaTask() throws Throwable { } }; MetaTask writer = new MetaTask(connection, "writer") { + @Override - void metaTask() throws Throwable { - MetaTableAccessor.addRegionToMeta(connection, regions.get(0)); + void metaTask() throws IOException { + MetaTableAccessor.addRegionsToMeta(connection, Collections.singletonList(regions.get(0)), + 1); LOG.info("Wrote " + regions.get(0).getEncodedName()); } }; @@ -377,44 +377,6 @@ public static void assertEmptyMetaLocation(Table meta, byte[] row, int replicaId assertEquals(0, startCodeCell.getValueLength()); } - @Test - public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException { - long regionId = System.currentTimeMillis(); - RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); - - Table meta = MetaTableAccessor.getMetaHTable(connection); - try { - List regionInfos = Lists.newArrayList(primary); - MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3); - MetaTableAccessor.removeRegionReplicasFromMeta(Sets.newHashSet(primary.getRegionName()), 1, 2, - connection); - Get get = new Get(primary.getRegionName()); - Result result = meta.get(get); - for (int replicaId = 0; replicaId < 3; replicaId++) { - Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerColumn(replicaId)); - Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getStartCodeColumn(replicaId)); - Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getRegionStateColumn(replicaId)); - Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - CatalogFamilyFormat.getServerNameColumn(replicaId)); - if (replicaId == 0) { - assertNotNull(stateCell); - } else { - assertNull(serverCell); - assertNull(startCodeCell); - assertNull(stateCell); - assertNull(snCell); - } - } - } finally { - meta.close(); - } - } - @Test public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOException { long regionId = System.currentTimeMillis(); @@ -434,14 +396,6 @@ public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOE } } - private Map getMapOfRegionsToSeqNum(RegionInfo... regions) { - Map mids = new HashMap<>(regions.length); - for (RegionInfo region : regions) { - mids.put(region, -1L); - } - return mids; - } - @Test public void testMetaScanner() throws Exception { LOG.info("Starting " + name.getMethodName()); @@ -454,7 +408,7 @@ public void testMetaScanner() throws Exception { UTIL.createTable(tableName, FAMILY, SPLIT_KEYS); Table table = connection.getTable(tableName); // Make sure all the regions are deployed - UTIL.countRows(table); + HBaseTestingUtility.countRows(table); ClientMetaTableAccessor.Visitor visitor = mock(ClientMetaTableAccessor.Visitor.class); doReturn(true).when(visitor).visit((Result) anyObject()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java index 166fade9b654..25e8be246448 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java @@ -94,7 +94,7 @@ public void testDeleteForSureClearsAllTableRowsFromMeta() // content from a few of the rows. try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { try (ResultScanner scanner = metaTable.getScanner( - MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { + MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) { for (Result result : scanner) { // Just delete one row. Delete d = new Delete(result.getRow()); @@ -114,8 +114,8 @@ public void testDeleteForSureClearsAllTableRowsFromMeta() fail("Got an exception while deleting " + tableName); } int rowCount = 0; - try (ResultScanner scanner = metaTable - .getScanner(MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { + try (ResultScanner scanner = metaTable.getScanner( + MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) { for (Result result : scanner) { LOG.info("Found when none expected: " + result); rowCount++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java index 0f4e97fd7532..b7dd87b54e0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.util.Collections; import java.util.concurrent.Executors; import java.util.concurrent.Future; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -299,7 +300,8 @@ public void testLoadRegionFromMetaAfterRegionManuallyAdded() throws Exception { RegionInfo hri = createRegionInfo(tableName, 1); assertNull("RegionInfo was just instantiated by the test, but " + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); - MetaTableAccessor.addRegionToMeta(this.util.getConnection(), hri); + MetaTableAccessor.addRegionsToMeta(this.util.getConnection(), Collections.singletonList(hri), + 1); assertNull("RegionInfo was manually added in META, but " + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); hri = am.loadRegionFromMeta(hri.getEncodedName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index 05451260a9be..a53771d46c7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -394,4 +394,41 @@ public void testAddMergeRegions() throws IOException { previousQualifier = qualifier; } } + + @Test + public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException { + long regionId = System.currentTimeMillis(); + TableName tableName = name.getTableName(); + RegionInfo primary = RegionInfoBuilder.newBuilder(tableName) + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); + + try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) { + List regionInfos = Lists.newArrayList(primary); + MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3); + final RegionStateStore regionStateStore = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + regionStateStore.updateRegionReplicas(tableName, 3, 1); + Get get = new Get(primary.getRegionName()); + Result result = meta.get(get); + for (int replicaId = 0; replicaId < 3; replicaId++) { + Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getServerColumn(replicaId)); + Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getStartCodeColumn(replicaId)); + Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getRegionStateColumn(replicaId)); + Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getServerNameColumn(replicaId)); + if (replicaId == 0) { + assertNotNull(stateCell); + } else { + assertNull(serverCell); + assertNull(startCodeCell); + assertNull(stateCell); + assertNull(snCell); + } + } + } + } } From 3828803833965a049a09c8cd5e35d6e88c3977cf Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Mon, 5 Oct 2020 11:51:59 -0700 Subject: [PATCH 420/769] HBASE-25143 Remove branch-1.3 from precommit and docs (#2491) Following the announcement [0] to EOL branch-1.3, update the precommit script to not consider this branch any longer, and refresh mentions of this branch in the doc. [0]: https://lists.apache.org/thread.html/r9552e9085aaac2a43f8b26b866d34825a84a9be7f19118ac560d14de%40%3Cuser.hbase.apache.org%3E Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel --- dev-support/hbase-personality.sh | 9 +-------- src/main/asciidoc/_chapters/community.adoc | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 6f1355cf31a1..d9d11a83befa 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -553,14 +553,7 @@ function hadoopcheck_rebuild # All supported Hadoop versions that we want to test the compilation with # See the Hadoop section on prereqs in the HBase Reference Guide - if [[ "${PATCH_BRANCH}" = branch-1.3 ]]; then - yetus_info "Setting Hadoop 2 versions to test based on branch-1.3 rules." - if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.7" - else - hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7" - fi - elif [[ "${PATCH_BRANCH}" = branch-1.4 ]]; then + if [[ "${PATCH_BRANCH}" = branch-1.4 ]]; then yetus_info "Setting Hadoop 2 versions to test based on branch-1.4 rules." if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then hbase_hadoop2_versions="2.7.7" diff --git a/src/main/asciidoc/_chapters/community.adoc b/src/main/asciidoc/_chapters/community.adoc index 339fa6e90b7c..3db648238a05 100644 --- a/src/main/asciidoc/_chapters/community.adoc +++ b/src/main/asciidoc/_chapters/community.adoc @@ -43,13 +43,18 @@ See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts [[hbase.fix.version.in.jira]] .How to set fix version in JIRA on issue resolve -Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set versions in JIRA when we resolve an issue. -If master is going to be 2.0.0, and branch-1 1.4.0 then: - -* Commit only to master: Mark with 2.0.0 -* Commit to branch-1 and master: Mark with 2.0.0, and 1.4.0 -* Commit to branch-1.3, branch-1, and master: Mark with 2.0.0, 1.4.0, and 1.3.x -* Commit site fixes: no version +Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set versions in JIRA when we +resolve an issue. If master is going to be 3.0.0, branch-2 will be 2.4.0, and branch-1 will be +1.7.0 then: + +* Commit only to master (i.e., backward-incompatible new feature): Mark with 3.0.0 +* Commit only to master and branch-2 (i.e., backward-compatible new feature, applicable only to + 2.x+): Mark with 3.0.0 and 2.4.0 +* Commit to master, branch-2, and branch-1 (i.e., backward-compatible new feature, applicable + everywhere): Mark with 3.0.0, 2.4.0, and 1.7.0 +* Commit to master, branch-2, and branch-2.3, branch-2.2, branch-2, branch-1.4 (i.e., bug fix + applicable to all active release lines): Mark with 3.0.0, 2.4.0, 2.3.x, 2.2.x, 1.7.0, and 1.4.x +* Commit a fix to the website: no version [[hbase.when.to.close.jira]] .Policy on when to set a RESOLVED JIRA as CLOSED From f9d88245f0c8dbb327b7f3a4178a4366c1b8bbea Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 6 Oct 2020 08:35:16 +0800 Subject: [PATCH 421/769] HBASE-25154 Set java.io.tmpdir to project build directory to avoid writing std*deferred files to /tmp (#2502) Signed-off-by: stack Signed-off-by: Viraj Jasani Signed-off-by: Sean Busbey --- pom.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pom.xml b/pom.xml index 088a7e4898e4..75fa9ceb3612 100755 --- a/pom.xml +++ b/pom.xml @@ -761,6 +761,7 @@ ${test.output.tofile} ${test.build.classes} + ${test.tmp.dir} ${project.build.directory}/test-classes + ${project.build.directory} yyyy-MM-dd'T'HH:mm:ss'Z' From 3c50c43c5c3515a30d8d6a82d63357881a9278c2 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Thu, 8 Oct 2020 17:04:48 +0900 Subject: [PATCH 428/769] HBASE-25160 Refactor AccessController and VisibilityController (#2506) Signed-off-by: stack --- .../security/access/AccessController.java | 66 ++++--------------- .../visibility/VisibilityController.java | 66 +------------------ 2 files changed, 13 insertions(+), 119 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index cb664bb2d2f8..3779903f869a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -429,7 +429,6 @@ private enum OpType { DELETE("delete"), CHECK_AND_PUT("checkAndPut"), CHECK_AND_DELETE("checkAndDelete"), - INCREMENT_COLUMN_VALUE("incrementColumnValue"), APPEND("append"), INCREMENT("increment"); @@ -1503,18 +1502,27 @@ public void preBatchMutate(ObserverContext c, // We have a failure with table, cf and q perm checks and now giving a chance for cell // perm check OpType opType; + long timestamp; if (m instanceof Put) { checkForReservedTagPresence(user, m); opType = OpType.PUT; + timestamp = m.getTimestamp(); } else if (m instanceof Delete) { opType = OpType.DELETE; + timestamp = m.getTimestamp(); + } else if (m instanceof Increment) { + opType = OpType.INCREMENT; + timestamp = ((Increment) m).getTimeRange().getMax(); + } else if (m instanceof Append) { + opType = OpType.APPEND; + timestamp = ((Append) m).getTimeRange().getMax(); } else { - // If the operation type is not Put or Delete, do nothing + // If the operation type is not Put/Delete/Increment/Append, do nothing continue; } AuthResult authResult = null; if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), - m.getFamilyCellMap(), m.getTimestamp(), Action.WRITE)) { + m.getFamilyCellMap(), timestamp, Action.WRITE)) { authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap()); } else { @@ -1695,32 +1703,6 @@ public Result preAppend(ObserverContext c, Append return null; } - @Override - public Result preAppendAfterRowLock(final ObserverContext c, - final Append append) throws IOException { - if (append.getAttribute(CHECK_COVERING_PERM) != null) { - // We had failure with table, cf and q perm checks and now giving a chance for cell - // perm check - TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); - AuthResult authResult = null; - User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.APPEND, c.getEnvironment(), append.getRow(), - append.getFamilyCellMap(), append.getTimeRange().getMax(), Action.WRITE)) { - authResult = AuthResult.allow(OpType.APPEND.toString(), - "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap()); - } else { - authResult = AuthResult.deny(OpType.APPEND.toString(), - "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap()); - } - AccessChecker.logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - return null; - } - @Override public Result preIncrement(final ObserverContext c, final Increment increment) @@ -1756,32 +1738,6 @@ public Result preIncrement(final ObserverContext c return null; } - @Override - public Result preIncrementAfterRowLock(final ObserverContext c, - final Increment increment) throws IOException { - if (increment.getAttribute(CHECK_COVERING_PERM) != null) { - // We had failure with table, cf and q perm checks and now giving a chance for cell - // perm check - TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); - AuthResult authResult = null; - User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.INCREMENT, c.getEnvironment(), increment.getRow(), - increment.getFamilyCellMap(), increment.getTimeRange().getMax(), Action.WRITE)) { - authResult = AuthResult.allow(OpType.INCREMENT.toString(), "Covering cell set", - user, Action.WRITE, table, increment.getFamilyCellMap()); - } else { - authResult = AuthResult.deny(OpType.INCREMENT.toString(), "Covering cell set", - user, Action.WRITE, table, increment.getFamilyCellMap()); - } - AccessChecker.logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - return null; - } - @Override public List> postIncrementBeforeWAL( ObserverContext ctx, Mutation mutation, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 02ed4dd1df0b..37f25a83ea72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -45,11 +45,9 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -69,7 +67,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.FilterList; @@ -323,7 +320,7 @@ public void preBatchMutate(ObserverContext c, } } } - if (!sanityFailure) { + if (!sanityFailure && (m instanceof Put || m instanceof Delete)) { if (cellVisibility != null) { String labelsExp = cellVisibility.getExpression(); List visibilityTags = labelCache.get(labelsExp); @@ -360,7 +357,7 @@ public void preBatchMutate(ObserverContext c, if (m instanceof Put) { Put p = (Put) m; p.add(cell); - } else if (m instanceof Delete) { + } else { Delete d = (Delete) m; d.add(cell); } @@ -470,35 +467,6 @@ private Pair checkForReservedVisibilityTagPresence(Cell cell, return pair; } - /** - * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This - * tag type is reserved and should not be explicitly set by user. There are - * two versions of this method one that accepts pair and other without pair. - * In case of preAppend and preIncrement the additional operations are not - * needed like checking for STRING_VIS_TAG_TYPE and hence the API without pair - * could be used. - * - * @param cell - * @throws IOException - */ - private boolean checkForReservedVisibilityTagPresence(Cell cell) throws IOException { - // Bypass this check when the operation is done by a system/super user. - // This is done because, while Replication, the Cells coming to the peer - // cluster with reserved - // typed tags and this is fine and should get added to the peer cluster - // table - if (isSystemOrSuperUser()) { - return true; - } - Iterator tagsItr = PrivateCellUtil.tagsIterator(cell); - while (tagsItr.hasNext()) { - if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) { - return false; - } - } - return true; - } - private void removeReplicationVisibilityTag(List tags) throws IOException { Iterator iterator = tags.iterator(); while (iterator.hasNext()) { @@ -657,36 +625,6 @@ private boolean isSystemOrSuperUser() throws IOException { return Superusers.isSuperUser(VisibilityUtils.getActiveUser()); } - @Override - public Result preAppend(ObserverContext e, Append append) - throws IOException { - // If authorization is not enabled, we don't care about reserved tags - if (!authorizationEnabled) { - return null; - } - for (CellScanner cellScanner = append.cellScanner(); cellScanner.advance();) { - if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { - throw new FailedSanityCheckException("Append contains cell with reserved type tag"); - } - } - return null; - } - - @Override - public Result preIncrement(ObserverContext e, Increment increment) - throws IOException { - // If authorization is not enabled, we don't care about reserved tags - if (!authorizationEnabled) { - return null; - } - for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) { - if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { - throw new FailedSanityCheckException("Increment contains cell with reserved type tag"); - } - } - return null; - } - @Override public List> postIncrementBeforeWAL( ObserverContext ctx, Mutation mutation, From c7248a4a06fcfb5bb72d205694b0584454bfc987 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 8 Oct 2020 21:35:29 +0800 Subject: [PATCH 429/769] HBASE-25124 Support changing region replica count without disabling table (#2497) Signed-off-by: stack --- .../server/master/MasterProcedure.proto | 2 + .../master/assignment/AssignmentManager.java | 59 ++++--- .../master/assignment/RegionStateStore.java | 41 +---- .../procedure/EnableTableProcedure.java | 53 +----- .../procedure/ModifyTableProcedure.java | 72 +++++--- .../hadoop/hbase/HBaseTestingUtility.java | 2 - .../hadoop/hbase/client/TestAdmin3.java | 20 --- .../assignment/TestRegionStateStore.java | 2 +- .../procedure/TestModifyTableProcedure.java | 4 +- .../TestRegionReplicasWithModifyTable.java | 155 ++++++------------ 10 files changed, 157 insertions(+), 253 deletions(-) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 59a1d68fda44..8d8b9af009cd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -72,6 +72,8 @@ enum ModifyTableState { MODIFY_TABLE_DELETE_FS_LAYOUT = 5; MODIFY_TABLE_POST_OPERATION = 6; MODIFY_TABLE_REOPEN_ALL_REGIONS = 7; + MODIFY_TABLE_CLOSE_EXCESS_REPLICAS = 8; + MODIFY_TABLE_ASSIGN_NEW_REPLICAS = 9; } message ModifyTableStateData { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index d2e1eb3e9d42..fb64514a3377 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -870,32 +870,47 @@ private TransitRegionStateProcedure[] createAssignProcedures( .sorted(AssignmentManager::compare).toArray(TransitRegionStateProcedure[]::new); } + // for creating unassign TRSP when disabling a table or closing excess region replicas + private TransitRegionStateProcedure forceCreateUnssignProcedure(RegionStateNode regionNode) { + regionNode.lock(); + try { + if (!regionStates.include(regionNode, false) || + regionStates.isRegionOffline(regionNode.getRegionInfo())) { + return null; + } + // As in DisableTableProcedure or ModifyTableProcedure, we will hold the xlock for table, so + // we can make sure that this procedure has not been executed yet, as TRSP will hold the + // shared lock for table all the time. So here we will unset it and when it is actually + // executed, it will find that the attach procedure is not itself and quit immediately. + if (regionNode.getProcedure() != null) { + regionNode.unsetProcedure(regionNode.getProcedure()); + } + return regionNode.setProcedure(TransitRegionStateProcedure.unassign(getProcedureEnvironment(), + regionNode.getRegionInfo())); + } finally { + regionNode.unlock(); + } + } + /** * Called by DisableTableProcedure to unassign all the regions for a table. */ public TransitRegionStateProcedure[] createUnassignProceduresForDisabling(TableName tableName) { - return regionStates.getTableRegionStateNodes(tableName).stream().map(regionNode -> { - regionNode.lock(); - try { - if (!regionStates.include(regionNode, false) || - regionStates.isRegionOffline(regionNode.getRegionInfo())) { - return null; - } - // As in DisableTableProcedure, we will hold the xlock for table, so we can make sure that - // this procedure has not been executed yet, as TRSP will hold the shared lock for table all - // the time. So here we will unset it and when it is actually executed, it will find that - // the attach procedure is not itself and quit immediately. - if (regionNode.getProcedure() != null) { - regionNode.unsetProcedure(regionNode.getProcedure()); - } - TransitRegionStateProcedure proc = TransitRegionStateProcedure - .unassign(getProcedureEnvironment(), regionNode.getRegionInfo()); - regionNode.setProcedure(proc); - return proc; - } finally { - regionNode.unlock(); - } - }).filter(p -> p != null).toArray(TransitRegionStateProcedure[]::new); + return regionStates.getTableRegionStateNodes(tableName).stream() + .map(this::forceCreateUnssignProcedure).filter(p -> p != null) + .toArray(TransitRegionStateProcedure[]::new); + } + + /** + * Called by ModifyTableProcedures to unassign all the excess region replicas + * for a table. + */ + public TransitRegionStateProcedure[] createUnassignProceduresForClosingExcessRegionReplicas( + TableName tableName, int newReplicaCount) { + return regionStates.getTableRegionStateNodes(tableName).stream() + .filter(regionNode -> regionNode.getRegionInfo().getReplicaId() >= newReplicaCount) + .map(this::forceCreateUnssignProcedure).filter(p -> p != null) + .toArray(TransitRegionStateProcedure[]::new); } public SplitTableRegionProcedure createSplitProcedure(final RegionInfo regionToSplit, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 500e5ec79da0..78f2bb75fe8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -547,25 +547,12 @@ public void overwriteRegions(List regionInfos, int regionReplication LOG.debug("Overwritten regions: {} ", regionInfos); } - /** - * Update region replicas if necessary by adding new replica locations or removing unused region - * replicas - */ - public void updateRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) - throws IOException { - if (newReplicaCount < oldReplicaCount) { - removeRegionReplicas(tableName, oldReplicaCount, newReplicaCount); - } else if (newReplicaCount > oldReplicaCount) { - addRegionReplicas(tableName, oldReplicaCount, newReplicaCount); - } - } - private Scan getScanForUpdateRegionReplicas(TableName tableName) { return MetaTableAccessor.getScanForTableName(master.getConfiguration(), tableName) .addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); } - private void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) + public void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) throws IOException { Scan scan = getScanForUpdateRegionReplicas(tableName); List deletes = new ArrayList<>(); @@ -598,32 +585,6 @@ private void removeRegionReplicas(TableName tableName, int oldReplicaCount, int } } - private void addRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) - throws IOException { - Scan scan = getScanForUpdateRegionReplicas(tableName); - List puts = new ArrayList<>(); - long now = EnvironmentEdgeManager.currentTime(); - try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { - for (;;) { - Result result = scanner.next(); - if (result == null) { - break; - } - RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); - if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { - continue; - } - Put put = new Put(result.getRow(), now); - for (int i = oldReplicaCount; i < newReplicaCount; i++) { - MetaTableAccessor.addEmptyLocation(put, i); - } - puts.add(put); - } - debugLogMutations(puts); - metaTable.put(puts); - } - } - // ========================================================================== // Table Descriptors helpers // ========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 6ca83fe01efe..1e48981e417c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -20,17 +20,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.CatalogFamilyFormat; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -100,7 +94,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS case ENABLE_TABLE_MARK_REGIONS_ONLINE: // Get the region replica count. If changed since disable, need to do // more work assigning. - Connection connection = env.getMasterServices().getConnection(); TableDescriptor tableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); int configuredReplicaCount = tableDescriptor.getRegionReplication(); @@ -111,25 +104,16 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS // How many replicas do we currently have? Check regions returned from // in-memory state. int currentMaxReplica = getMaxReplicaId(regionsOfTable); - - // Read the META table to know the number of replicas the table currently has. - // If there was a table modification on region replica count then need to - // adjust replica counts here. - int replicasFound = TableName.isMetaTableName(this.tableName)? - 0: // TODO: Figure better what to do here for hbase:meta replica. - getReplicaCountInMeta(connection, configuredReplicaCount, regionsOfTable); - LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound, - configuredReplicaCount, tableName.getNameAsString()); - if (currentMaxReplica == (configuredReplicaCount - 1)) { - if (LOG.isDebugEnabled()) { - LOG.debug("No change in number of region replicas (configuredReplicaCount={});" - + " assigning.", configuredReplicaCount); - } + if (currentMaxReplica == configuredReplicaCount - 1) { + LOG.debug("No change in number of region replicas (configuredReplicaCount={});" + + " assigning.", configuredReplicaCount); } else if (currentMaxReplica > (configuredReplicaCount - 1)) { // We have additional regions as the replica count has been decreased. Delete // those regions because already the table is in the unassigned state - LOG.info("The number of replicas " + (currentMaxReplica + 1) - + " is more than the region replica count " + configuredReplicaCount); + LOG.warn( + "The number of replicas {} is more than the region replica count {}" + + ", usually this should not happen as we will delete them in ModifyTableProcedure", + currentMaxReplica + 1, configuredReplicaCount); List copyOfRegions = new ArrayList(regionsOfTable); for (RegionInfo regionInfo : copyOfRegions) { if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) { @@ -140,11 +124,11 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS regionsOfTable.remove(regionInfo); } } - } else { + } else if (currentMaxReplica < configuredReplicaCount - 1) { // the replicasFound is less than the regionReplication LOG.info("Number of replicas has increased for {}. Assigning new region replicas." + "The previous replica count was {}. The current replica count is {}.", - this.tableName, (currentMaxReplica + 1), configuredReplicaCount); + this.tableName, currentMaxReplica + 1, configuredReplicaCount); regionsOfTable = RegionReplicaUtil.addReplicas(regionsOfTable, currentMaxReplica + 1, configuredReplicaCount); } @@ -174,25 +158,6 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS return Flow.HAS_MORE_STATE; } - /** - * @return Count of replicas found reading hbase:meta Region row or zk if - * asking about the hbase:meta table itself.. - */ - private int getReplicaCountInMeta(Connection connection, int regionReplicaCount, - List regionsOfTable) throws IOException { - Result r = MetaTableAccessor.getCatalogFamilyRow(connection, regionsOfTable.get(0)); - int replicasFound = 0; - for (int i = 1; i < regionReplicaCount; i++) { - // Since we have already added the entries to the META we will be getting only that here - List columnCells = - r.getColumnCells(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i)); - if (!columnCells.isEmpty()) { - replicasFound++; - } - } - return replicasFound; - } - @Override protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 9b29d30b9b36..beb129b6f52b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -24,16 +24,17 @@ import java.util.List; import java.util.Set; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.hadoop.hbase.ConcurrentTableModificationException; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -128,6 +129,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_PRE_OPERATION: preModify(env, state); + setNextState(ModifyTableState.MODIFY_TABLE_CLOSE_EXCESS_REPLICAS); + break; + case MODIFY_TABLE_CLOSE_EXCESS_REPLICAS: + if (isTableEnabled(env)) { + closeExcessReplicasIfNeeded(env); + } setNextState(ModifyTableState.MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR); break; case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR: @@ -135,7 +142,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN); break; case MODIFY_TABLE_REMOVE_REPLICA_COLUMN: - updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor); + removeReplicaColumnsIfNeeded(env); setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION); break; case MODIFY_TABLE_POST_OPERATION: @@ -146,6 +153,10 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS if (isTableEnabled(env)) { addChildProcedure(new ReopenTableRegionsProcedure(getTableName())); } + setNextState(ModifyTableState.MODIFY_TABLE_ASSIGN_NEW_REPLICAS); + break; + case MODIFY_TABLE_ASSIGN_NEW_REPLICAS: + assignNewReplicasIfNeeded(env); if (deleteColumnFamilyInModify) { setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); } else { @@ -297,14 +308,6 @@ private void prepareModify(final MasterProcedureEnv env) throws IOException { env.getMasterServices().getTableDescriptors().get(getTableName()); } - if (env.getMasterServices().getTableStateManager() - .isTableState(getTableName(), TableState.State.ENABLED)) { - if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor - .getRegionReplication()) { - throw new TableNotDisabledException( - "REGION_REPLICATION change is not supported for enabled tables"); - } - } this.deleteColumnFamilyInModify = isDeleteColumnFamily(unmodifiedTableDescriptor, modifiedTableDescriptor); if (!unmodifiedTableDescriptor.getRegionServerGroup() @@ -375,17 +378,36 @@ private void deleteFromFs(final MasterProcedureEnv env, } /** - * update replica column families if necessary. + * remove replica columns if necessary. */ - private void updateReplicaColumnsIfNeeded(MasterProcedureEnv env, - TableDescriptor oldTableDescriptor, TableDescriptor newTableDescriptor) throws IOException { - final int oldReplicaCount = oldTableDescriptor.getRegionReplication(); - final int newReplicaCount = newTableDescriptor.getRegionReplication(); - env.getAssignmentManager().getRegionStateStore().updateRegionReplicas(getTableName(), + private void removeReplicaColumnsIfNeeded(MasterProcedureEnv env) throws IOException { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount >= oldReplicaCount) { + return; + } + env.getAssignmentManager().getRegionStateStore().removeRegionReplicas(getTableName(), oldReplicaCount, newReplicaCount); - if (newReplicaCount > oldReplicaCount && oldReplicaCount <= 1) { - // The table has been newly enabled for replica. So check if we need to setup - // region replication + env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()).stream() + .filter(r -> r.getReplicaId() >= newReplicaCount) + .forEach(env.getAssignmentManager().getRegionStates()::deleteRegion); + } + + private void assignNewReplicasIfNeeded(MasterProcedureEnv env) throws IOException { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount <= oldReplicaCount) { + return; + } + if (isTableEnabled(env)) { + List newReplicas = env.getAssignmentManager().getRegionStates() + .getRegionsOfTable(getTableName()).stream().filter(RegionReplicaUtil::isDefaultReplica) + .flatMap(primaryRegion -> IntStream.range(oldReplicaCount, newReplicaCount).mapToObj( + replicaId -> RegionReplicaUtil.getRegionInfoForReplica(primaryRegion, replicaId))) + .collect(Collectors.toList()); + addChildProcedure(env.getAssignmentManager().createAssignProcedures(newReplicas)); + } + if (oldReplicaCount <= 1) { try { ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices()); } catch (ReplicationException e) { @@ -394,6 +416,16 @@ private void updateReplicaColumnsIfNeeded(MasterProcedureEnv env, } } + private void closeExcessReplicasIfNeeded(MasterProcedureEnv env) { + final int oldReplicaCount = unmodifiedTableDescriptor.getRegionReplication(); + final int newReplicaCount = modifiedTableDescriptor.getRegionReplication(); + if (newReplicaCount >= oldReplicaCount) { + return; + } + addChildProcedure(env.getAssignmentManager() + .createUnassignProceduresForClosingExcessRegionReplicas(getTableName(), newReplicaCount)); + } + /** * Action after modifying table. * @param env MasterProcedureEnv diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 528b155cb94c..1fb2f00d67cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1846,11 +1846,9 @@ public static void modifyTableSync(Admin admin, TableDescriptor desc) */ public static void setReplicas(Admin admin, TableName table, int replicaCount) throws IOException, InterruptedException { - admin.disableTable(table); TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table)) .setRegionReplication(replicaCount).build(); admin.modifyTable(desc); - admin.enableTable(table); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java index c648db94aa39..441d40194a9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java @@ -263,26 +263,6 @@ public void testReadOnlyTableModify() throws IOException, InterruptedException { assertFalse(ADMIN.tableExists(tableName)); } - @Test(expected = TableNotDisabledException.class) - public void testModifyRegionReplicasEnabledTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - - // Modify region replication count - TableDescriptor htd = TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)) - .setRegionReplication(3).build(); - try { - // try to modify the region replication count without disabling the table - ADMIN.modifyTable(htd); - fail("Expected an exception"); - } finally { - // Delete the table - ADMIN.disableTable(tableName); - ADMIN.deleteTable(tableName); - assertFalse(ADMIN.tableExists(tableName)); - } - } - @Test public void testDeleteLastColumnFamily() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index a53771d46c7b..ad1340104a98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -408,7 +408,7 @@ public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws I MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3); final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); - regionStateStore.updateRegionReplicas(tableName, 3, 1); + regionStateStore.removeRegionReplicas(tableName, 3, 1); Get get = new Get(primary.getRegionName()); Result result = meta.get(get); for (int replicaId = 0; replicaId < 3; replicaId++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index 4461f2f0ede4..f5cc543e86d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -371,7 +371,7 @@ public void testRollbackAndDoubleExecutionOnline() throws Exception { long procId = procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), newTd)); - int lastStep = 3; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR + int lastStep = 8; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); // cf2 should not be present @@ -404,7 +404,7 @@ public void testRollbackAndDoubleExecutionOffline() throws Exception { new ModifyTableProcedure(procExec.getEnvironment(), newTd)); // Restart the executor and rollback the step twice - int lastStep = 3; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR + int lastStep = 8; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); // cf2 should not be present diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java index f01038eee70c..4ca8059bc285 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java @@ -20,51 +20,64 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RegionSplitter; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +@RunWith(Parameterized.class) @Category({ RegionServerTests.class, MediumTests.class }) public class TestRegionReplicasWithModifyTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicasWithModifyTable.class); + HBaseClassTestRule.forClass(TestRegionReplicasWithModifyTable.class); private static final int NB_SERVERS = 3; - private static Table table; private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); private static final byte[] f = HConstants.CATALOG_FAMILY; + @Parameter + public boolean disableBeforeModifying; + @Rule - public TestName name = new TestName(); + public TableNameTestRule name = new TableNameTestRule(); + + @Parameters + public static List params() { + return Arrays.asList(new Object[] { true }, new Object[] { false }); + } @BeforeClass public static void before() throws Exception { HTU.startMiniCluster(NB_SERVERS); } - private static void enableReplicationByModification(final TableName tableName, - boolean withReplica, int initialReplicaCount, int enableReplicaCount, int splitCount) - throws IOException, InterruptedException { + private void enableReplicationByModification(boolean withReplica, int initialReplicaCount, + int enableReplicaCount, int splitCount) throws IOException, InterruptedException { + TableName tableName = name.getTableName(); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); if (withReplica) { builder.setRegionReplication(initialReplicaCount); @@ -72,14 +85,25 @@ private static void enableReplicationByModification(final TableName tableName, TableDescriptor htd = builder.build(); if (splitCount > 0) { byte[][] splits = getSplits(splitCount); - table = HTU.createTable(htd, new byte[][] { f }, splits, - new Configuration(HTU.getConfiguration())); - + HTU.createTable(htd, new byte[][] { f }, splits, new Configuration(HTU.getConfiguration())); } else { - table = HTU.createTable(htd, new byte[][] { f }, (byte[][]) null, + HTU.createTable(htd, new byte[][] { f }, (byte[][]) null, new Configuration(HTU.getConfiguration())); } - HBaseTestingUtility.setReplicas(HTU.getAdmin(), table.getName(), enableReplicaCount); + if (disableBeforeModifying) { + HTU.getAdmin().disableTable(tableName); + } + HBaseTestingUtility.setReplicas(HTU.getAdmin(), tableName, enableReplicaCount); + if (disableBeforeModifying) { + HTU.getAdmin().enableTable(tableName); + } + int expectedRegionCount; + if (splitCount > 0) { + expectedRegionCount = enableReplicaCount * splitCount; + } else { + expectedRegionCount = enableReplicaCount; + } + assertTotalRegions(expectedRegionCount); } private static byte[][] getSplits(int numRegions) { @@ -91,123 +115,50 @@ private static byte[][] getSplits(int numRegions) { @AfterClass public static void afterClass() throws Exception { - HRegionServer.TEST_SKIP_REPORTING_TRANSITION = false; - table.close(); HTU.shutdownMiniCluster(); } - private HRegionServer getRS() { - return HTU.getMiniHBaseCluster().getRegionServer(0); - } - - private HRegionServer getSecondaryRS() { - return HTU.getMiniHBaseCluster().getRegionServer(1); + @After + public void tearDown() throws IOException { + TableName tableName = name.getTableName(); + HTU.getAdmin().disableTable(tableName); + HTU.getAdmin().deleteTable(tableName); } - private HRegionServer getTertiaryRS() { - return HTU.getMiniHBaseCluster().getRegionServer(2); + private void assertTotalRegions(int expected) { + int actual = HTU.getHBaseCluster().getRegions(name.getTableName()).size(); + assertEquals(expected, actual); } @Test public void testRegionReplicasUsingEnableTable() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, false, 0, 3, 0); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be more than 1", 3, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } - } - - private void disableAndDeleteTable(TableName tableName) throws IOException { - HTU.getAdmin().disableTable(tableName); - HTU.getAdmin().deleteTable(tableName); + enableReplicationByModification(false, 0, 3, 0); } @Test public void testRegionReplicasUsingEnableTableForMultipleRegions() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, false, 0, 3, 10); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 30", 30, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + enableReplicationByModification(false, 0, 3, 10); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsIncreased() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 2, 3, 0); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be 3", 3, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + enableReplicationByModification(true, 2, 3, 0); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsDecreased() throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 3, 2, 0); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 2", 2, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + enableReplicationByModification(true, 3, 2, 0); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsDecreasedWithMultipleRegions() - throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 3, 2, 20); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 40", 40, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + throws Exception { + enableReplicationByModification(true, 3, 2, 20); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsIncreasedWithmultipleRegions() - throws Exception { - TableName tableName = null; - try { - tableName = TableName.valueOf(name.getMethodName()); - enableReplicationByModification(tableName, true, 2, 3, 15); - List onlineRegions = getRS().getRegions(tableName); - List onlineRegions2 = getSecondaryRS().getRegions(tableName); - List onlineRegions3 = getTertiaryRS().getRegions(tableName); - int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 45", 3 * 15, totalRegions); - } finally { - disableAndDeleteTable(tableName); - } + throws Exception { + enableReplicationByModification(true, 2, 3, 15); } } From 0d2641ba04844c161518647baa1182300fb7d993 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 8 Oct 2020 09:10:30 -0700 Subject: [PATCH 430/769] HBASE-25165 Change 'State time' in UI so sorts (#2508) Display startcode in iso8601. Signed-off-by: Nick Dimiduk --- .../apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon index a3067eeaf8ad..58783611180e 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon @@ -121,7 +121,7 @@ Arrays.sort(serverNames); <& serverNameLink; serverName=serverName; &> - <% new Date(startcode) %> + <% java.time.Instant.ofEpochMilli(startcode) %> <% TraditionalBinaryPrefix.long2String(lastContact, "s", 1) %> <% version %> <% String.format("%,.0f", requestsPerSecond) %> From a5c18505df0da9cf19e5988e9a4056ab6fb5092d Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Thu, 8 Oct 2020 23:00:16 +0530 Subject: [PATCH 431/769] HBASE-24025: Improve performance of move_servers_rsgroup by using async region move API (#1549) --- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 153 ++++++++++++------ .../hbase/rsgroup/TestRSGroupsAdmin2.java | 35 ++++ 2 files changed, 135 insertions(+), 53 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index e1d9d66e4ec3..16a44ad76ccc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -33,6 +33,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; +import java.util.concurrent.Future; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -78,6 +79,7 @@ import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -956,84 +958,129 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { /** * Move every region from servers which are currently located on these servers, but should not be * located there. - * @param servers the servers that will move to new group - * @param targetGroupName the target group name + * @param movedServers the servers that are moved to new group + * @param srcGrpServers all servers in the source group, excluding the movedServers + * @param targetGroup the target group * @throws IOException if moving the server and tables fail */ - private void moveServerRegionsFromGroup(Set

    servers, String targetGroupName) - throws IOException { - moveRegionsBetweenGroups(servers, targetGroupName, rs -> getRegions(rs), info -> { - try { - String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) + private void moveServerRegionsFromGroup(Set
    movedServers, Set
    srcGrpServers, + RSGroupInfo targetGroup) throws IOException { + moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroup, rs -> getRegions(rs), + info -> { + try { + String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); - return groupName.equals(targetGroupName); - } catch (IOException e) { - LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName); - return false; - } - }, rs -> rs.getHostname()); + return groupName.equals(targetGroup.getName()); + } catch (IOException e) { + LOG.warn("Failed to test group for region {} and target group {}", info, + targetGroup.getName()); + return false; + } + }); } - private void moveRegionsBetweenGroups(Set regionsOwners, String targetGroupName, - Function> getRegionsInfo, Function validation, - Function getOwnerName) throws IOException { - boolean hasRegionsToMove; + private void moveRegionsBetweenGroups(Set regionsOwners, Set
    newRegionsOwners, + RSGroupInfo targetGrp, Function> getRegionsInfo, + Function validation) throws IOException { + // Get server names corresponding to given Addresses + List movedServerNames = new ArrayList<>(regionsOwners.size()); + List srcGrpServerNames = new ArrayList<>(newRegionsOwners.size()); + for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) { + // In case region move failed in previous attempt, regionsOwners and newRegionsOwners + // can have the same servers. So for all servers below both conditions to be checked + if (newRegionsOwners.contains(serverName.getAddress())) { + srcGrpServerNames.add(serverName); + } + if (regionsOwners.contains(serverName.getAddress())) { + movedServerNames.add(serverName); + } + } + List>> assignmentFutures = new ArrayList<>(); int retry = 0; - Set allOwners = new HashSet<>(regionsOwners); Set failedRegions = new HashSet<>(); IOException toThrow = null; do { - hasRegionsToMove = false; - for (Iterator iter = allOwners.iterator(); iter.hasNext(); ) { - T owner = iter.next(); + assignmentFutures.clear(); + failedRegions.clear(); + for (ServerName owner : movedServerNames) { // Get regions that are associated with this server and filter regions by group tables. - for (RegionInfo region : getRegionsInfo.apply(owner)) { + for (RegionInfo region : getRegionsInfo.apply((T) owner.getAddress())) { if (!validation.apply(region)) { LOG.info("Moving region {}, which do not belong to RSGroup {}", - region.getShortNameToLog(), targetGroupName); + region.getShortNameToLog(), targetGrp.getName()); + // Move region back to source RSGroup servers + ServerName dest = + masterServices.getLoadBalancer().randomAssignment(region, srcGrpServerNames); + if (dest == null) { + failedRegions.add(region.getRegionNameAsString()); + continue; + } + RegionPlan rp = new RegionPlan(region, owner, dest); try { - this.masterServices.getAssignmentManager().move(region); - failedRegions.remove(region.getRegionNameAsString()); + Future future = masterServices.getAssignmentManager().moveAsync(rp); + assignmentFutures.add(Pair.newPair(region, future)); } catch (IOException ioe) { + failedRegions.add(region.getRegionNameAsString()); LOG.debug("Move region {} from group failed, will retry, current retry time is {}", - region.getShortNameToLog(), retry, ioe); + region.getShortNameToLog(), retry, ioe); toThrow = ioe; - failedRegions.add(region.getRegionNameAsString()); } - if (masterServices.getAssignmentManager().getRegionStates(). - getRegionState(region).isFailedOpen()) { - continue; - } - hasRegionsToMove = true; } } - - if (!hasRegionsToMove) { - LOG.info("No more regions to move from {} to RSGroup", getOwnerName.apply(owner)); - iter.remove(); - } } - - retry++; - try { - wait(1000); - } catch (InterruptedException e) { - LOG.warn("Sleep interrupted", e); - Thread.currentThread().interrupt(); + waitForRegionMovement(assignmentFutures, failedRegions, targetGrp.getName(), retry); + if (failedRegions.isEmpty()) { + LOG.info("All regions from server(s) {} moved to target group {}.", movedServerNames, + targetGrp.getName()); + return; + } else { + try { + wait(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + Thread.currentThread().interrupt(); + } + retry++; } - } while (hasRegionsToMove && retry <= - masterServices.getConfiguration().getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); + } while (!failedRegions.isEmpty() && retry <= masterServices.getConfiguration() + .getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); //has up to max retry time or there are no more regions to move - if (hasRegionsToMove) { + if (!failedRegions.isEmpty()) { // print failed moved regions, for later process conveniently String msg = String - .format("move regions for group %s failed, failed regions: %s", targetGroupName, - failedRegions); + .format("move regions for group %s failed, failed regions: %s", targetGrp.getName(), + failedRegions); LOG.error(msg); throw new DoNotRetryIOException( - msg + ", just record the last failed region's cause, more details in server log", - toThrow); + msg + ", just record the last failed region's cause, more details in server log", toThrow); + } + } + + /** + * Wait for all the region move to complete. Keep waiting for other region movement + * completion even if some region movement fails. + */ + private void waitForRegionMovement(List>> regionMoveFutures, + Set failedRegions, String tgtGrpName, int retryCount) { + LOG.info("Moving {} region(s) to group {}, current retry={}", regionMoveFutures.size(), + tgtGrpName, retryCount); + for (Pair> pair : regionMoveFutures) { + try { + pair.getSecond().get(); + if (masterServices.getAssignmentManager().getRegionStates(). + getRegionState(pair.getFirst()).isFailedOpen()) { + failedRegions.add(pair.getFirst().getRegionNameAsString()); + } + } catch (InterruptedException e) { + //Dont return form there lets wait for other regions to complete movement. + failedRegions.add(pair.getFirst().getRegionNameAsString()); + LOG.warn("Sleep interrupted", e); + } catch (Exception e) { + failedRegions.add(pair.getFirst().getRegionNameAsString()); + LOG.error("Move region {} to group {} failed, will retry on next attempt", + pair.getFirst().getShortNameToLog(), tgtGrpName, e); + } } } @@ -1185,7 +1232,7 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE if (StringUtils.isEmpty(targetGroupName)) { throw new ConstraintException("RSGroup cannot be null."); } - getRSGroupInfo(targetGroupName); + RSGroupInfo targetGroup = getRSGroupInfo(targetGroupName); // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. @@ -1230,7 +1277,7 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE // MovedServers may be < passed in 'servers'. Set
    movedServers = moveServers(servers, srcGrp.getName(), targetGroupName); - moveServerRegionsFromGroup(movedServers, targetGroupName); + moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroup); LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index a3a08eabe996..983414236c3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -686,4 +686,39 @@ public void testFailedMoveServersTablesAndRepair() throws Exception { assertEquals(regionsInfo.getTable(), table2); } } + + @Test + public void testMoveServersToRSGroupPerformance() throws Exception { + final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 2); + final byte[] familyNameBytes = Bytes.toBytes("f"); + // there will be 100 regions are both the serves + final int tableRegionCount = 200; + // All the regions created below will be assigned to the default group. + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, tableRegionCount); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) { + return false; + } + return getTableRegionMap().get(tableName).size() >= tableRegionCount; + } + }); + ADMIN.setRSGroup(Sets.newHashSet(tableName), newGroup.getName()); + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + String rsGroup2 = "rsGroup2"; + ADMIN.addRSGroup(rsGroup2); + + long startTime = System.currentTimeMillis(); + ADMIN.moveServersToRSGroup(Sets.newHashSet(newGroup.getServers().first()), rsGroup2); + long timeTaken = System.currentTimeMillis() - startTime; + String msg = + "Should not take mote than 15000 ms to move a table with 100 regions. Time taken =" + + timeTaken + " ms"; + //This test case is meant to be used for verifying the performance quickly by a developer. + //Moving 100 regions takes much less than 15000 ms. Given 15000 ms so test cases passes + // on all environment. + assertTrue(msg, timeTaken < 15000); + LOG.info("Time taken to move a table with 100 region is {} ms", timeTaken); + } } From f0de2d99ba436a7c02cda436336443faeea08bf1 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 8 Oct 2020 14:23:38 -0700 Subject: [PATCH 432/769] HBASE-25156 TestMasterFailover.testSimpleMasterFailover is flaky (#2507) Change the test to wait for evidence that the active master has seen that the backup master killed by the test has gone away. This is done before proceeding to validate that the dead backup is correctly omitted from the ClusterStatus report. Also, minor fixup to several assertions, using `assertEquals` instead of `assertTrue(...equals(...))` and correcting expected vs. actual ordering of assertion arguments. Signed-off-by: Michael Stack --- .../hbase/master/TestMasterFailover.java | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index a27936df07a4..1e37fcb52b58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +20,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; - import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -100,7 +100,7 @@ public void testSimpleMasterFailover() throws Exception { // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); ClusterMetrics status = active.getClusterMetrics(); - assertTrue(status.getMasterName().equals(activeName)); + assertEquals(activeName, status.getMasterName()); assertEquals(2, status.getBackupMasterNames().size()); // attempt to stop one of the inactive masters @@ -113,7 +113,7 @@ public void testSimpleMasterFailover() throws Exception { // Verify still one active master and it's the same for (int i = 0; i < masterThreads.size(); i++) { if (masterThreads.get(i).getMaster().isActiveMaster()) { - assertTrue(activeName.equals(masterThreads.get(i).getMaster().getServerName())); + assertEquals(activeName, masterThreads.get(i).getMaster().getServerName()); activeIndex = i; active = masterThreads.get(activeIndex).getMaster(); } @@ -126,10 +126,15 @@ public void testSimpleMasterFailover() throws Exception { " regions servers"); assertEquals(3, rsCount); + // wait for the active master to acknowledge loss of the backup from ZK + final HMaster activeFinal = active; + TEST_UTIL.waitFor( + TimeUnit.SECONDS.toMillis(30), () -> activeFinal.getBackupMasters().size() == 1); + // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); status = active.getClusterMetrics(); - assertTrue(status.getMasterName().equals(activeName)); + assertEquals(activeName, status.getMasterName()); assertEquals(1, status.getBackupMasterNames().size()); // kill the active master @@ -148,13 +153,13 @@ public void testSimpleMasterFailover() throws Exception { active = masterThreads.get(0).getMaster(); assertNotNull(active); status = active.getClusterMetrics(); - ServerName mastername = status.getMasterName(); - assertTrue(mastername.equals(active.getServerName())); + ServerName masterName = status.getMasterName(); + assertNotNull(masterName); + assertEquals(active.getServerName(), masterName); assertTrue(active.isActiveMaster()); assertEquals(0, status.getBackupMasterNames().size()); int rss = status.getLiveServerMetrics().size(); - LOG.info("Active master " + mastername.getServerName() + " managing " + - rss + " region servers"); + LOG.info("Active master {} managing {} region servers", masterName.getServerName(), rss); assertEquals(3, rss); } finally { // Stop the cluster From 14c1bf3497a4cc67f9932d548b10327e40e21574 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Wed, 23 Sep 2020 16:47:23 -0700 Subject: [PATCH 433/769] HBASE-24628 Region normalizer now respects a rate limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement a rate limiter for the normalizer. Implemented in terms of MB/sec of affacted region size (the same metrics used to make normalization decisions). Uses Guava `RateLimiter` to perform the resource accounting. `RateLimiter` works by blocking (uninterruptible 😖) the calling thread. Thus, the whole construction of the normalizer subsystem needed refactoring. See the provided `package-info.java` for an overview of this new structure. Introduces a new configuration, `hbase.normalizer.throughput.max_bytes_per_sec`, for specifying a limit on the throughput of actions executed by the normalizer. Note that while this configuration value is in bytes, the minimum honored valued `1_000_000`. Supports values configured using the human-readable suffixes honored by `Configuration.getLongBytes` Signed-off-by: Viraj Jasani Signed-off-by: Huaxiang Sun Signed-off-by: Michael Stack --- .../apache/hadoop/hbase/master/HMaster.java | 180 +++---------- .../hbase/master/MasterRpcServices.java | 30 ++- .../hadoop/hbase/master/MasterServices.java | 27 +- .../master/MetricsMasterWrapperImpl.java | 4 +- .../MergeTableRegionsProcedure.java | 8 +- .../assignment/SplitTableRegionProcedure.java | 17 +- .../normalizer/MergeNormalizationPlan.java | 72 ++--- .../master/normalizer/NormalizationPlan.java | 18 +- .../normalizer/NormalizationTarget.java | 80 ++++++ .../master/normalizer/RegionNormalizer.java | 24 +- .../normalizer/RegionNormalizerChore.java | 24 +- .../normalizer/RegionNormalizerFactory.java | 30 ++- .../normalizer/RegionNormalizerManager.java | 174 ++++++++++++ .../normalizer/RegionNormalizerWorkQueue.java | 244 +++++++++++++++++ .../normalizer/RegionNormalizerWorker.java | 253 ++++++++++++++++++ .../normalizer/SimpleRegionNormalizer.java | 49 +--- .../normalizer/SplitNormalizationPlan.java | 29 +- .../hbase/master/normalizer/package-info.java | 100 +++++++ .../hbase/master/MockNoopMasterServices.java | 23 +- .../master/TestMasterChoreScheduled.java | 35 +-- .../master/TestMasterMetricsWrapper.java | 6 +- .../TestRegionNormalizerWorkQueue.java | 234 ++++++++++++++++ .../TestRegionNormalizerWorker.java | 252 +++++++++++++++++ .../TestSimpleRegionNormalizer.java | 85 ++++-- .../TestSimpleRegionNormalizerOnCluster.java | 7 +- 25 files changed, 1634 insertions(+), 371 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index cf43c8b814c4..9c617bbe7f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -29,7 +29,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; -import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -49,7 +48,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.servlet.ServletException; @@ -117,11 +115,8 @@ import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; @@ -202,7 +197,6 @@ import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HFileArchiveUtil; @@ -233,7 +227,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Service; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; @@ -337,9 +330,6 @@ public void run() { // Tracker for split and merge state private SplitOrMergeTracker splitOrMergeTracker; - // Tracker for region normalizer state - private RegionNormalizerTracker regionNormalizerTracker; - private ClusterSchemaService clusterSchemaService; public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = @@ -406,11 +396,8 @@ public void run() { private final LockManager lockManager = new LockManager(this); private RSGroupBasedLoadBalancer balancer; - // a lock to prevent concurrent normalization actions. - private final ReentrantLock normalizationInProgressLock = new ReentrantLock(); - private RegionNormalizer normalizer; private BalancerChore balancerChore; - private RegionNormalizerChore normalizerChore; + private RegionNormalizerManager regionNormalizerManager; private ClusterStatusChore clusterStatusChore; private ClusterStatusPublisher clusterStatusPublisherChore = null; private SnapshotCleanerChore snapshotCleanerChore = null; @@ -464,9 +451,6 @@ public void run() { // handle table states private TableStateManager tableStateManager; - private long splitPlanCount; - private long mergePlanCount; - /** jetty server for master to redirect requests to regionserver infoServer */ private Server masterJettyServer; @@ -788,26 +772,19 @@ public MetricsMaster getMasterMetrics() { } /** - *

    * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it * should have already been initialized along with {@link ServerManager}. - *

    - *

    - * Will be overridden in tests. - *

    */ - @VisibleForTesting - protected void initializeZKBasedSystemTrackers() - throws IOException, InterruptedException, KeeperException, ReplicationException { + private void initializeZKBasedSystemTrackers() + throws IOException, KeeperException, ReplicationException { this.balancer = new RSGroupBasedLoadBalancer(); this.balancer.setConf(conf); this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this); this.loadBalancerTracker.start(); - this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf); - this.normalizer.setMasterServices(this); - this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this); - this.regionNormalizerTracker.start(); + this.regionNormalizerManager = + RegionNormalizerFactory.createNormalizerManager(conf, zooKeeper, this); + this.regionNormalizerManager.start(); this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this); this.splitOrMergeTracker.start(); @@ -900,10 +877,10 @@ protected AssignmentManager createAssignmentManager(MasterServices master) { * * *
  • If this is a new deploy, schedule a InitMetaProcedure to initialize meta
  • - *
  • Start necessary service threads - balancer, catalog janior, executor services, and also the - * procedure executor, etc. Notice that the balancer must be created first as assignment manager - * may use it when assigning regions.
  • - *
  • Wait for meta to be initialized if necesssary, start table state manager.
  • + *
  • Start necessary service threads - balancer, catalog janitor, executor services, and also + * the procedure executor, etc. Notice that the balancer must be created first as assignment + * manager may use it when assigning regions.
  • + *
  • Wait for meta to be initialized if necessary, start table state manager.
  • *
  • Wait for enough region servers to check-in
  • *
  • Let assignment manager load data from meta and construct region states
  • *
  • Start all other things such as chore services, etc
  • @@ -1116,8 +1093,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc getChoreService().scheduleChore(clusterStatusChore); this.balancerChore = new BalancerChore(this); getChoreService().scheduleChore(balancerChore); - this.normalizerChore = new RegionNormalizerChore(this); - getChoreService().scheduleChore(normalizerChore); + getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); this.catalogJanitorChore = new CatalogJanitor(this); getChoreService().scheduleChore(catalogJanitorChore); this.hbckChore = new HbckChore(this); @@ -1533,6 +1509,9 @@ protected void stopServiceThreads() { // example. stopProcedureExecutor(); + if (regionNormalizerManager != null) { + regionNormalizerManager.stop(); + } if (this.quotaManager != null) { this.quotaManager.stop(); } @@ -1651,7 +1630,7 @@ private void stopChores() { choreService.cancelChore(this.mobFileCleanerChore); choreService.cancelChore(this.mobFileCompactionChore); choreService.cancelChore(this.balancerChore); - choreService.cancelChore(this.normalizerChore); + choreService.cancelChore(getRegionNormalizerManager().getRegionNormalizerChore()); choreService.cancelChore(this.clusterStatusChore); choreService.cancelChore(this.catalogJanitorChore); choreService.cancelChore(this.clusterStatusPublisherChore); @@ -1751,7 +1730,9 @@ public boolean balance() throws IOException { * @param action the name of the action under consideration, for logging. * @return {@code true} when the caller should exit early, {@code false} otherwise. */ - private boolean skipRegionManagementAction(final String action) { + @Override + public boolean skipRegionManagementAction(final String action) { + // Note: this method could be `default` on MasterServices if but for logging. if (!isInitialized()) { LOG.debug("Master has not been initialized, don't run {}.", action); return true; @@ -1896,24 +1877,16 @@ public List executeRegionPlansWithThrottling(List plans) } @Override - public RegionNormalizer getRegionNormalizer() { - return this.normalizer; + public RegionNormalizerManager getRegionNormalizerManager() { + return regionNormalizerManager; } - public boolean normalizeRegions() throws IOException { - return normalizeRegions(new NormalizeTableFilterParams.Builder().build()); - } - - /** - * Perform normalization of cluster. - * - * @return true if an existing normalization was already in progress, or if a new normalization - * was performed successfully; false otherwise (specifically, if HMaster finished initializing - * or normalization is globally disabled). - */ - public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IOException { - final long startTime = EnvironmentEdgeManager.currentTime(); - if (regionNormalizerTracker == null || !regionNormalizerTracker.isNormalizerOn()) { + @Override + public boolean normalizeRegions( + final NormalizeTableFilterParams ntfp, + final boolean isHighPriority + ) throws IOException { + if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); return false; } @@ -1924,70 +1897,17 @@ public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IO return false; } - if (!normalizationInProgressLock.tryLock()) { - // Don't run the normalizer concurrently - LOG.info("Normalization already in progress. Skipping request."); - return true; - } - - int affectedTables = 0; - try { - final Set matchingTables = getTableDescriptors(new LinkedList<>(), - ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) - .stream() - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); - final Set allEnabledTables = - tableStateManager.getTablesInStates(TableState.State.ENABLED); - final List targetTables = - new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); - Collections.shuffle(targetTables); - - final List submittedPlanProcIds = new ArrayList<>(); - for (TableName table : targetTables) { - if (table.isSystemTable()) { - continue; - } - final TableDescriptor tblDesc = getTableDescriptors().get(table); - if (tblDesc != null && !tblDesc.isNormalizationEnabled()) { - LOG.debug( - "Skipping table {} because normalization is disabled in its table properties.", table); - continue; - } - - // make one last check that the cluster isn't shutting down before proceeding. - if (skipRegionManagementAction("region normalizer")) { - return false; - } - - final List plans = normalizer.computePlansForTable(table); - if (CollectionUtils.isEmpty(plans)) { - LOG.debug("No normalization required for table {}.", table); - continue; - } - - affectedTables++; - // as of this writing, `plan.submit()` is non-blocking and uses Async Admin APIs to - // submit task , so there's no artificial rate- - // limiting of merge/split requests due to this serial loop. - for (NormalizationPlan plan : plans) { - long procId = plan.submit(this); - submittedPlanProcIds.add(procId); - if (plan.getType() == PlanType.SPLIT) { - splitPlanCount++; - } else if (plan.getType() == PlanType.MERGE) { - mergePlanCount++; - } - } - } - final long endTime = EnvironmentEdgeManager.currentTime(); - LOG.info("Normalizer ran successfully in {}. Submitted {} plans, affecting {} tables.", - Duration.ofMillis(endTime - startTime), submittedPlanProcIds.size(), affectedTables); - LOG.debug("Normalizer submitted procID list: {}", submittedPlanProcIds); - } finally { - normalizationInProgressLock.unlock(); - } - return true; + final Set matchingTables = getTableDescriptors(new LinkedList<>(), + ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) + .stream() + .map(TableDescriptor::getTableName) + .collect(Collectors.toSet()); + final Set allEnabledTables = + tableStateManager.getTablesInStates(TableState.State.ENABLED); + final List targetTables = + new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); + Collections.shuffle(targetTables); + return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority); } /** @@ -3003,20 +2923,6 @@ public double getAverageLoad() { return regionStates.getAverageLoad(); } - /* - * @return the count of region split plans executed - */ - public long getSplitPlanCount() { - return splitPlanCount; - } - - /* - * @return the count of region merge plans executed - */ - public long getMergePlanCount() { - return mergePlanCount; - } - @Override public boolean registerService(Service instance) { /* @@ -3511,8 +3417,7 @@ public boolean isBalancerOn() { */ public boolean isNormalizerOn() { return !isInMaintenanceMode() - && regionNormalizerTracker != null - && regionNormalizerTracker.isNormalizerOn(); + && getRegionNormalizerManager().isNormalizerOn(); } /** @@ -3540,13 +3445,6 @@ public String getLoadBalancerClassName() { LoadBalancerFactory.getDefaultLoadBalancerClass().getName()); } - /** - * @return RegionNormalizerTracker instance - */ - public RegionNormalizerTracker getRegionNormalizerTracker() { - return regionNormalizerTracker; - } - public SplitOrMergeTracker getSplitOrMergeTracker() { return splitOrMergeTracker; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 37fc58985e7b..d4dbc8d55dcd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1913,9 +1913,7 @@ public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController contr master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); } } - } catch (IOException e) { - throw new ServiceException(e); - } catch (KeeperException e) { + } catch (IOException | KeeperException e) { throw new ServiceException(e); } return response.build(); @@ -1940,7 +1938,8 @@ public NormalizeResponse normalize(RpcController controller, .namespace(request.hasNamespace() ? request.getNamespace() : null) .build(); return NormalizeResponse.newBuilder() - .setNormalizerRan(master.normalizeRegions(ntfp)) + // all API requests are considered priority requests. + .setNormalizerRan(master.normalizeRegions(ntfp, true)) .build(); } catch (IOException ex) { throw new ServiceException(ex); @@ -1953,20 +1952,27 @@ public SetNormalizerRunningResponse setNormalizerRunning(RpcController controlle rpcPreCheck("setNormalizerRunning"); // Sets normalizer on/off flag in ZK. - boolean prevValue = master.getRegionNormalizerTracker().isNormalizerOn(); - boolean newValue = request.getOn(); - try { - master.getRegionNormalizerTracker().setNormalizerOn(newValue); - } catch (KeeperException ke) { - LOG.warn("Error flipping normalizer switch", ke); - } + // TODO: this method is totally broken in terms of atomicity of actions and values read. + // 1. The contract has this RPC returning the previous value. There isn't a ZKUtil method + // that lets us retrieve the previous value as part of setting a new value, so we simply + // perform a read before issuing the update. Thus we have a data race opportunity, between + // when the `prevValue` is read and whatever is actually overwritten. + // 2. Down in `setNormalizerOn`, the call to `createAndWatch` inside of the catch clause can + // itself fail in the event that the znode already exists. Thus, another data race, between + // when the initial `setData` call is notified of the absence of the target znode and the + // subsequent `createAndWatch`, with another client creating said node. + // That said, there's supposed to be only one active master and thus there's supposed to be + // only one process with the authority to modify the value. + final boolean prevValue = master.getRegionNormalizerManager().isNormalizerOn(); + final boolean newValue = request.getOn(); + master.getRegionNormalizerManager().setNormalizerOn(newValue); LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue); return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); } @Override public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, - IsNormalizerEnabledRequest request) throws ServiceException { + IsNormalizerEnabledRequest request) { IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder(); response.setEnabled(master.isNormalizerOn()); return response.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 908d21270c6e..384785d738f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -34,7 +35,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; @@ -54,7 +55,6 @@ import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.Service; @@ -122,9 +122,9 @@ public interface MasterServices extends Server { MasterQuotaManager getMasterQuotaManager(); /** - * @return Master's instance of {@link RegionNormalizer} + * @return Master's instance of {@link RegionNormalizerManager} */ - RegionNormalizer getRegionNormalizer(); + RegionNormalizerManager getRegionNormalizerManager(); /** * @return Master's instance of {@link CatalogJanitor} @@ -354,6 +354,13 @@ long splitRegion( */ boolean isInMaintenanceMode(); + /** + * Checks master state before initiating action over region topology. + * @param action the name of the action under consideration, for logging. + * @return {@code true} when the caller should exit early, {@code false} otherwise. + */ + boolean skipRegionManagementAction(final String action); + /** * Abort a procedure. * @param procId ID of the procedure @@ -553,4 +560,14 @@ default SplitWALManager getSplitWALManager(){ * @return The state of the load balancer, or false if the load balancer isn't defined. */ boolean isBalancerOn(); + + /** + * Perform normalization of cluster. + * @param ntfp Selection criteria for identifying which tables to normalize. + * @param isHighPriority {@code true} when these requested tables should skip to the front of + * the queue. + * @return {@code true} when the request was submitted, {@code false} otherwise. + */ + boolean normalizeRegions( + final NormalizeTableFilterParams ntfp, final boolean isHighPriority) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java index 9d4550c5eb0a..aeaae929209e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java @@ -55,12 +55,12 @@ public double getAverageLoad() { @Override public long getSplitPlanCount() { - return master.getSplitPlanCount(); + return master.getRegionNormalizerManager().getSplitPlanCount(); } @Override public long getMergePlanCount() { - return master.getMergePlanCount(); + return master.getRegionNormalizerManager().getMergePlanCount(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index f1b3329b25c0..5e06a44912b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -59,9 +59,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -534,8 +532,10 @@ private void preMergeRegions(final MasterProcedureEnv env) throws IOException { try { env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion); } catch (QuotaExceededException e) { - env.getMasterServices().getRegionNormalizer().planSkipped(this.mergedRegion, - NormalizationPlan.PlanType.MERGE); + // TODO: why is this here? merge requests can be submitted by actors other than the normalizer + env.getMasterServices() + .getRegionNormalizerManager() + .planSkipped(NormalizationPlan.PlanType.MERGE); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index d0413360e6df..0eb7667d7da3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,13 +71,11 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -181,9 +179,10 @@ private boolean hasBestSplitRow() { private void checkSplittable(final MasterProcedureEnv env, final RegionInfo regionToSplit, final byte[] splitRow) throws IOException { // Ask the remote RS if this region is splittable. - // If we get an IOE, report it along w/ the failure so can see why we are not splittable at this time. + // If we get an IOE, report it along w/ the failure so can see why we are not splittable at + // this time. if(regionToSplit.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke split on non-default regions directly"); + throw new IllegalArgumentException("Can't invoke split on non-default regions directly"); } RegionStateNode node = env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion()); @@ -570,8 +569,10 @@ private void preSplitRegion(final MasterProcedureEnv env) try { env.getMasterServices().getMasterQuotaManager().onRegionSplit(this.getParentRegion()); } catch (QuotaExceededException e) { - env.getMasterServices().getRegionNormalizer().planSkipped(this.getParentRegion(), - NormalizationPlan.PlanType.SPLIT); + // TODO: why is this here? split requests can be submitted by actors other than the normalizer + env.getMasterServices() + .getRegionNormalizerManager() + .planSkipped(NormalizationPlan.PlanType.SPLIT); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java index 17e313047d72..677b9ec8052e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java @@ -18,41 +18,35 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import java.io.IOException; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Normalization plan to merge regions (smallest region in the table with its smallest neighbor). + * Normalization plan to merge adjacent regions. As with any call to + * {@link MasterServices#mergeRegions(RegionInfo[], boolean, long, long)} + * with {@code forcible=false}, Region order and adjacency are important. It's the caller's + * responsibility to ensure the provided parameters are ordered according to the + * {code mergeRegions} method requirements. */ @InterfaceAudience.Private -public class MergeNormalizationPlan implements NormalizationPlan { +final class MergeNormalizationPlan implements NormalizationPlan { - private final RegionInfo firstRegion; - private final RegionInfo secondRegion; + private final List normalizationTargets; - public MergeNormalizationPlan(RegionInfo firstRegion, RegionInfo secondRegion) { - this.firstRegion = firstRegion; - this.secondRegion = secondRegion; - } - - /** - * {@inheritDoc} - */ - @Override - public long submit(MasterServices masterServices) throws IOException { - // Do not use force=true as corner cases can happen, non adjacent regions, - // merge with a merged child region with no GC done yet, it is going to - // cause all different issues. - return masterServices - .mergeRegions(new RegionInfo[] { firstRegion, secondRegion }, false, HConstants.NO_NONCE, - HConstants.NO_NONCE); + private MergeNormalizationPlan(List normalizationTargets) { + Preconditions.checkNotNull(normalizationTargets); + Preconditions.checkState(normalizationTargets.size() >= 2, + "normalizationTargets.size() must be >= 2 but was %s", normalizationTargets.size()); + this.normalizationTargets = Collections.unmodifiableList(normalizationTargets); } @Override @@ -60,19 +54,14 @@ public PlanType getType() { return PlanType.MERGE; } - RegionInfo getFirstRegion() { - return firstRegion; - } - - RegionInfo getSecondRegion() { - return secondRegion; + public List getNormalizationTargets() { + return normalizationTargets; } @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("firstRegion", firstRegion) - .append("secondRegion", secondRegion) + .append("normalizationTargets", normalizationTargets) .toString(); } @@ -89,16 +78,31 @@ public boolean equals(Object o) { MergeNormalizationPlan that = (MergeNormalizationPlan) o; return new EqualsBuilder() - .append(firstRegion, that.firstRegion) - .append(secondRegion, that.secondRegion) + .append(normalizationTargets, that.normalizationTargets) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) - .append(firstRegion) - .append(secondRegion) + .append(normalizationTargets) .toHashCode(); } + + /** + * A helper for constructing instances of {@link MergeNormalizationPlan}. + */ + static class Builder { + + private final List normalizationTargets = new LinkedList<>(); + + public Builder addTarget(final RegionInfo regionInfo, final long regionSizeMb) { + normalizationTargets.add(new NormalizationTarget(regionInfo, regionSizeMb)); + return this; + } + + public MergeNormalizationPlan build() { + return new MergeNormalizationPlan(normalizationTargets); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java index cd13f69e764e..3bfae14e0b7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +17,12 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; /** - * Interface for normalization plan. + * A {@link NormalizationPlan} describes some modification to region split points as identified + * by an instance of {@link RegionNormalizer}. It is a POJO describing what action needs taken + * and the regions it targets. */ @InterfaceAudience.Private public interface NormalizationPlan { @@ -33,15 +32,6 @@ enum PlanType { NONE } - /** - * Submits normalization plan on cluster (does actual splitting/merging work) and - * returns proc Id to caller. - * @param masterServices instance of {@link MasterServices} - * @return Proc Id for the submitted task - * @throws IOException If plan submission to Admin fails - */ - long submit(MasterServices masterServices) throws IOException; - /** * @return the type of this plan */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java new file mode 100644 index 000000000000..9e4b3f426403 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A POJO that caries details about a region selected for normalization through the pipeline. + */ +@InterfaceAudience.Private +class NormalizationTarget { + private final RegionInfo regionInfo; + private final long regionSizeMb; + + NormalizationTarget(final RegionInfo regionInfo, final long regionSizeMb) { + this.regionInfo = regionInfo; + this.regionSizeMb = regionSizeMb; + } + + public RegionInfo getRegionInfo() { + return regionInfo; + } + + public long getRegionSizeMb() { + return regionSizeMb; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + NormalizationTarget that = (NormalizationTarget) o; + + return new EqualsBuilder() + .append(regionSizeMb, that.regionSizeMb) + .append(regionInfo, that.regionInfo) + .isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 37) + .append(regionInfo) + .append(regionSizeMb) + .toHashCode(); + } + + @Override public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("regionInfo", regionInfo) + .append("regionSizeMb", regionSizeMb) + .toString(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java index 672171d1caff..6f939daeda92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java @@ -20,13 +20,9 @@ import java.util.List; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; /** * Performs "normalization" of regions of a table, making sure that suboptimal @@ -39,8 +35,7 @@ * "split/merge storms". */ @InterfaceAudience.Private -@InterfaceStability.Evolving -public interface RegionNormalizer extends Configurable { +interface RegionNormalizer extends Configurable { /** * Set the master service. Must be called before first call to * {@link #computePlansForTable(TableName)}. @@ -55,20 +50,5 @@ public interface RegionNormalizer extends Configurable { * @return A list of the normalization actions to perform, or an empty list * if there's nothing to do. */ - List computePlansForTable(TableName table) - throws HBaseIOException; - - /** - * Notification for the case where plan couldn't be executed due to constraint violation, such as - * namespace quota - * @param hri the region which is involved in the plan - * @param type type of plan - */ - void planSkipped(RegionInfo hri, PlanType type); - - /** - * @param type type of plan for which skipped count is to be returned - * @return the count of plans of specified type which were skipped - */ - long getSkippedCount(NormalizationPlan.PlanType type); + List computePlansForTable(TableName table); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java index 19d2dc7a3ba9..d56acc2a935e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,34 +17,35 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import java.io.IOException; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.master.HMaster; - -import java.io.IOException; /** - * Chore that will call {@link org.apache.hadoop.hbase.master.HMaster#normalizeRegions()} - * when needed. + * Chore that will periodically call + * {@link HMaster#normalizeRegions(NormalizeTableFilterParams, boolean)}. */ @InterfaceAudience.Private -public class RegionNormalizerChore extends ScheduledChore { +class RegionNormalizerChore extends ScheduledChore { private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerChore.class); - private final HMaster master; + private final MasterServices master; - public RegionNormalizerChore(HMaster master) { + public RegionNormalizerChore(MasterServices master) { super(master.getServerName() + "-RegionNormalizerChore", master, - master.getConfiguration().getInt("hbase.normalizer.period", 300000)); + master.getConfiguration().getInt("hbase.normalizer.period", 300_000)); this.master = master; } @Override protected void chore() { try { - master.normalizeRegions(); + master.normalizeRegions(new NormalizeTableFilterParams.Builder().build(), false); } catch (IOException e) { LOG.error("Failed to normalize regions.", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java index 06774c97a81e..92d16648fcd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Factory to create instance of {@link RegionNormalizer} as configured. @@ -32,13 +35,30 @@ public final class RegionNormalizerFactory { private RegionNormalizerFactory() { } + public static RegionNormalizerManager createNormalizerManager( + final Configuration conf, + final ZKWatcher zkWatcher, + final HMaster master // TODO: consolidate this down to MasterServices + ) { + final RegionNormalizer regionNormalizer = getRegionNormalizer(conf); + regionNormalizer.setMasterServices(master); + final RegionNormalizerTracker tracker = new RegionNormalizerTracker(zkWatcher, master); + final RegionNormalizerChore chore = + master.isInMaintenanceMode() ? null : new RegionNormalizerChore(master); + final RegionNormalizerWorkQueue workQueue = + master.isInMaintenanceMode() ? null : new RegionNormalizerWorkQueue<>(); + final RegionNormalizerWorker worker = master.isInMaintenanceMode() + ? null + : new RegionNormalizerWorker(conf, master, regionNormalizer, workQueue); + return new RegionNormalizerManager(tracker, chore, workQueue, worker); + } + /** * Create a region normalizer from the given conf. * @param conf configuration * @return {@link RegionNormalizer} implementation */ - public static RegionNormalizer getRegionNormalizer(Configuration conf) { - + private static RegionNormalizer getRegionNormalizer(Configuration conf) { // Create instance of Region Normalizer Class balancerKlass = conf.getClass(HConstants.HBASE_MASTER_NORMALIZER_CLASS, SimpleRegionNormalizer.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java new file mode 100644 index 000000000000..e818519d6513 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * This class encapsulates the details of the {@link RegionNormalizer} subsystem. + */ +@InterfaceAudience.Private +public class RegionNormalizerManager { + private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerManager.class); + + private final RegionNormalizerTracker regionNormalizerTracker; + private final RegionNormalizerChore regionNormalizerChore; + private final RegionNormalizerWorkQueue workQueue; + private final RegionNormalizerWorker worker; + private final ExecutorService pool; + + private final Object startStopLock = new Object(); + private boolean started = false; + private boolean stopped = false; + + public RegionNormalizerManager( + @NonNull final RegionNormalizerTracker regionNormalizerTracker, + @Nullable final RegionNormalizerChore regionNormalizerChore, + @Nullable final RegionNormalizerWorkQueue workQueue, + @Nullable final RegionNormalizerWorker worker + ) { + this.regionNormalizerTracker = regionNormalizerTracker; + this.regionNormalizerChore = regionNormalizerChore; + this.workQueue = workQueue; + this.worker = worker; + this.pool = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("normalizer-worker-%d") + .setUncaughtExceptionHandler( + (thread, throwable) -> + LOG.error("Uncaught exception, worker thread likely terminated.", throwable)) + .build()); + } + + public void start() { + synchronized (startStopLock) { + if (started) { + return; + } + regionNormalizerTracker.start(); + if (worker != null) { + // worker will be null when master is in maintenance mode. + pool.submit(worker); + } + started = true; + } + } + + public void stop() { + synchronized (startStopLock) { + if (!started) { + throw new IllegalStateException("calling `stop` without first calling `start`."); + } + if (stopped) { + return; + } + pool.shutdownNow(); // shutdownNow to interrupt the worker thread sitting on `take()` + regionNormalizerTracker.stop(); + stopped = true; + } + } + + public ScheduledChore getRegionNormalizerChore() { + return regionNormalizerChore; + } + + /** + * Return {@code true} if region normalizer is on, {@code false} otherwise + */ + public boolean isNormalizerOn() { + return regionNormalizerTracker.isNormalizerOn(); + } + + /** + * Set region normalizer on/off + * @param normalizerOn whether normalizer should be on or off + */ + public void setNormalizerOn(boolean normalizerOn) { + try { + regionNormalizerTracker.setNormalizerOn(normalizerOn); + } catch (KeeperException e) { + LOG.warn("Error flipping normalizer switch", e); + } + } + + /** + * Call-back for the case where plan couldn't be executed due to constraint violation, + * such as namespace quota. + * @param type type of plan that was skipped. + */ + public void planSkipped(NormalizationPlan.PlanType type) { + // TODO: this appears to be used only for testing. + if (worker != null) { + worker.planSkipped(type); + } + } + + /** + * Retrieve a count of the number of times plans of type {@code type} were submitted but skipped. + * @param type type of plan for which skipped count is to be returned + */ + public long getSkippedCount(NormalizationPlan.PlanType type) { + // TODO: this appears to be used only for testing. + return worker == null ? 0 : worker.getSkippedCount(type); + } + + /** + * Return the number of times a {@link SplitNormalizationPlan} has been submitted. + */ + public long getSplitPlanCount() { + return worker == null ? 0 : worker.getSplitPlanCount(); + } + + /** + * Return the number of times a {@link MergeNormalizationPlan} has been submitted. + */ + public long getMergePlanCount() { + return worker == null ? 0 : worker.getMergePlanCount(); + } + + /** + * Submit tables for normalization. + * @param tables a list of tables to submit. + * @param isHighPriority {@code true} when these requested tables should skip to the front of + * the queue. + * @return {@code true} when work was queued, {@code false} otherwise. + */ + public boolean normalizeRegions(List tables, boolean isHighPriority) { + if (workQueue == null) { + return false; + } + if (isHighPriority) { + workQueue.putAllFirst(tables); + } else { + workQueue.putAll(tables); + } + return true; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java new file mode 100644 index 000000000000..5ebb4f9ad08d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorkQueue.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A specialized collection that holds pending work for the {@link RegionNormalizerWorker}. It is + * an ordered collection class that has the following properties: + *
      + *
    • Guarantees uniqueness of elements, as a {@link Set}.
    • + *
    • Consumers retrieve objects from the head, as a {@link Queue}, via {@link #take()}.
    • + *
    • Work is retrieved on a FIFO policy.
    • + *
    • Work retrieval blocks the calling thread until new work is available, as a + * {@link BlockingQueue}.
    • + *
    • Allows a producer to insert an item at the head of the queue, if desired.
    • + *
    + * Assumes low-frequency and low-parallelism concurrent access, so protects state using a + * simplistic synchronization strategy. + */ +@InterfaceAudience.Private +class RegionNormalizerWorkQueue { + + /** Underlying storage structure that gives us the Set behavior and FIFO retrieval policy. */ + private LinkedHashSet delegate; + + // the locking structure used here follows the example found in LinkedBlockingQueue. The + // difference is that our locks guard access to `delegate` rather than the head node. + + /** Lock held by take, poll, etc */ + private final ReentrantLock takeLock; + + /** Wait queue for waiting takes */ + private final Condition notEmpty; + + /** Lock held by put, offer, etc */ + private final ReentrantLock putLock; + + RegionNormalizerWorkQueue() { + delegate = new LinkedHashSet<>(); + takeLock = new ReentrantLock(); + notEmpty = takeLock.newCondition(); + putLock = new ReentrantLock(); + } + + /** + * Signals a waiting take. Called only from put/offer (which do not + * otherwise ordinarily lock takeLock.) + */ + private void signalNotEmpty() { + final ReentrantLock takeLock = this.takeLock; + takeLock.lock(); + try { + notEmpty.signal(); + } finally { + takeLock.unlock(); + } + } + + /** + * Locks to prevent both puts and takes. + */ + private void fullyLock() { + putLock.lock(); + takeLock.lock(); + } + + /** + * Unlocks to allow both puts and takes. + */ + private void fullyUnlock() { + takeLock.unlock(); + putLock.unlock(); + } + + /** + * Inserts the specified element at the tail of the queue, if it's not already present. + * + * @param e the element to add + */ + public void put(E e) { + if (e == null) { + throw new NullPointerException(); + } + + putLock.lock(); + try { + delegate.add(e); + } finally { + putLock.unlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Inserts the specified element at the head of the queue. + * + * @param e the element to add + */ + public void putFirst(E e) { + if (e == null) { + throw new NullPointerException(); + } + putAllFirst(Collections.singleton(e)); + } + + /** + * Inserts the specified elements at the tail of the queue. Any elements already present in + * the queue are ignored. + * + * @param c the elements to add + */ + public void putAll(Collection c) { + if (c == null) { + throw new NullPointerException(); + } + + putLock.lock(); + try { + delegate.addAll(c); + } finally { + putLock.unlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Inserts the specified elements at the head of the queue. + * + * @param c the elements to add + */ + public void putAllFirst(Collection c) { + if (c == null) { + throw new NullPointerException(); + } + + fullyLock(); + try { + final LinkedHashSet copy = new LinkedHashSet<>(c.size() + delegate.size()); + copy.addAll(c); + copy.addAll(delegate); + delegate = copy; + } finally { + fullyUnlock(); + } + + if (!delegate.isEmpty()) { + signalNotEmpty(); + } + } + + /** + * Retrieves and removes the head of this queue, waiting if necessary + * until an element becomes available. + * + * @return the head of this queue + * @throws InterruptedException if interrupted while waiting + */ + public E take() throws InterruptedException { + E x; + takeLock.lockInterruptibly(); + try { + while (delegate.isEmpty()) { + notEmpty.await(); + } + final Iterator iter = delegate.iterator(); + x = iter.next(); + iter.remove(); + if (!delegate.isEmpty()) { + notEmpty.signal(); + } + } finally { + takeLock.unlock(); + } + return x; + } + + /** + * Atomically removes all of the elements from this queue. + * The queue will be empty after this call returns. + */ + public void clear() { + putLock.lock(); + try { + delegate.clear(); + } finally { + putLock.unlock(); + } + } + + /** + * Returns the number of elements in this queue. + * + * @return the number of elements in this queue + */ + public int size() { + takeLock.lock(); + try { + return delegate.size(); + } finally { + takeLock.unlock(); + } + } + + @Override + public String toString() { + takeLock.lock(); + try { + return delegate.toString(); + } finally { + takeLock.unlock(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java new file mode 100644 index 000000000000..30f9fc25364d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import java.io.IOException; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.RateLimiter; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; + +/** + * Consumes normalization request targets ({@link TableName}s) off the + * {@link RegionNormalizerWorkQueue}, dispatches them to the {@link RegionNormalizer}, + * and executes the resulting {@link NormalizationPlan}s. + */ +@InterfaceAudience.Private +class RegionNormalizerWorker implements Runnable { + private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerWorker.class); + + static final String RATE_LIMIT_BYTES_PER_SEC_KEY = + "hbase.normalizer.throughput.max_bytes_per_sec"; + private static final long RATE_UNLIMITED_BYTES = 1_000_000_000_000L; // 1TB/sec + + private final MasterServices masterServices; + private final RegionNormalizer regionNormalizer; + private final RegionNormalizerWorkQueue workQueue; + private final RateLimiter rateLimiter; + + private final long[] skippedCount; + private long splitPlanCount; + private long mergePlanCount; + + RegionNormalizerWorker( + final Configuration configuration, + final MasterServices masterServices, + final RegionNormalizer regionNormalizer, + final RegionNormalizerWorkQueue workQueue + ) { + this.masterServices = masterServices; + this.regionNormalizer = regionNormalizer; + this.workQueue = workQueue; + this.skippedCount = new long[NormalizationPlan.PlanType.values().length]; + this.splitPlanCount = 0; + this.mergePlanCount = 0; + this.rateLimiter = loadRateLimiter(configuration); + } + + private static RateLimiter loadRateLimiter(final Configuration configuration) { + long rateLimitBytes = + configuration.getLongBytes(RATE_LIMIT_BYTES_PER_SEC_KEY, RATE_UNLIMITED_BYTES); + long rateLimitMbs = rateLimitBytes / 1_000_000L; + if (rateLimitMbs <= 0) { + LOG.warn("Configured value {}={} is <= 1MB. Falling back to default.", + RATE_LIMIT_BYTES_PER_SEC_KEY, rateLimitBytes); + rateLimitBytes = RATE_UNLIMITED_BYTES; + rateLimitMbs = RATE_UNLIMITED_BYTES / 1_000_000L; + } + LOG.info("Normalizer rate limit set to {}", + rateLimitBytes == RATE_UNLIMITED_BYTES ? "unlimited" : rateLimitMbs + " MB/sec"); + return RateLimiter.create(rateLimitMbs); + } + + /** + * @see RegionNormalizerManager#planSkipped(NormalizationPlan.PlanType) + */ + void planSkipped(NormalizationPlan.PlanType type) { + synchronized (skippedCount) { + // updates come here via procedure threads, so synchronize access to this counter. + skippedCount[type.ordinal()]++; + } + } + + /** + * @see RegionNormalizerManager#getSkippedCount(NormalizationPlan.PlanType) + */ + long getSkippedCount(NormalizationPlan.PlanType type) { + return skippedCount[type.ordinal()]; + } + + /** + * @see RegionNormalizerManager#getSplitPlanCount() + */ + long getSplitPlanCount() { + return splitPlanCount; + } + + /** + * @see RegionNormalizerManager#getMergePlanCount() + */ + long getMergePlanCount() { + return mergePlanCount; + } + + @Override + public void run() { + while (true) { + if (Thread.interrupted()) { + LOG.debug("interrupt detected. terminating."); + break; + } + final TableName tableName; + try { + tableName = workQueue.take(); + } catch (InterruptedException e) { + LOG.debug("interrupt detected. terminating."); + break; + } + + final List plans = calculatePlans(tableName); + submitPlans(plans); + } + } + + private List calculatePlans(final TableName tableName) { + if (masterServices.skipRegionManagementAction("region normalizer")) { + return Collections.emptyList(); + } + + try { + final TableDescriptor tblDesc = masterServices.getTableDescriptors().get(tableName); + if (tblDesc != null && !tblDesc.isNormalizationEnabled()) { + LOG.debug("Skipping table {} because normalization is disabled in its table properties.", + tableName); + return Collections.emptyList(); + } + } catch (IOException e) { + LOG.debug("Skipping table {} because unable to access its table descriptor.", tableName, e); + return Collections.emptyList(); + } + + final List plans = regionNormalizer.computePlansForTable(tableName); + if (CollectionUtils.isEmpty(plans)) { + LOG.debug("No normalization required for table {}.", tableName); + return Collections.emptyList(); + } + return plans; + } + + private void submitPlans(final List plans) { + // as of this writing, `plan.submit()` is non-blocking and uses Async Admin APIs to submit + // task, so there's no artificial rate-limiting of merge/split requests due to this serial loop. + for (NormalizationPlan plan : plans) { + switch (plan.getType()) { + case MERGE: { + submitMergePlan((MergeNormalizationPlan) plan); + break; + } + case SPLIT: { + submitSplitPlan((SplitNormalizationPlan) plan); + break; + } + case NONE: + LOG.debug("Nothing to do for {} with PlanType=NONE. Ignoring.", plan); + planSkipped(plan.getType()); + break; + default: + LOG.warn("Plan {} is of an unrecognized PlanType. Ignoring.", plan); + planSkipped(plan.getType()); + break; + } + } + } + + /** + * Interacts with {@link MasterServices} in order to execute a plan. + */ + private void submitMergePlan(final MergeNormalizationPlan plan) { + final int totalSizeMb; + try { + final long totalSizeMbLong = plan.getNormalizationTargets() + .stream() + .mapToLong(NormalizationTarget::getRegionSizeMb) + .reduce(0, Math::addExact); + totalSizeMb = Math.toIntExact(totalSizeMbLong); + } catch (ArithmeticException e) { + LOG.debug("Sum of merge request size overflows rate limiter data type. {}", plan); + planSkipped(plan.getType()); + return; + } + + final RegionInfo[] infos = plan.getNormalizationTargets() + .stream() + .map(NormalizationTarget::getRegionInfo) + .toArray(RegionInfo[]::new); + final long pid; + try { + pid = masterServices.mergeRegions( + infos, false, HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (IOException e) { + LOG.info("failed to submit plan {}.", plan, e); + planSkipped(plan.getType()); + return; + } + mergePlanCount++; + LOG.info("Submitted {} resulting in pid {}", plan, pid); + final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb))); + LOG.debug("Rate limiting delayed the worker by {}", Duration.ofSeconds(rateLimitedSecs)); + } + + /** + * Interacts with {@link MasterServices} in order to execute a plan. + */ + private void submitSplitPlan(final SplitNormalizationPlan plan) { + final int totalSizeMb; + try { + totalSizeMb = Math.toIntExact(plan.getSplitTarget().getRegionSizeMb()); + } catch (ArithmeticException e) { + LOG.debug("Split request size overflows rate limiter data type. {}", plan); + planSkipped(plan.getType()); + return; + } + final RegionInfo info = plan.getSplitTarget().getRegionInfo(); + final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb))); + LOG.debug("Rate limiting delayed this operation by {}", Duration.ofSeconds(rateLimitedSecs)); + + final long pid; + try { + pid = masterServices.splitRegion( + info, null, HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (IOException e) { + LOG.info("failed to submit plan {}.", plan, e); + planSkipped(plan.getType()); + return; + } + splitPlanCount++; + LOG.info("Submitted {} resulting in pid {}", plan, pid); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index a904e17f7b0f..a641a0aa25b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.assignment.RegionStates; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -54,29 +53,9 @@ *
  • Otherwise, for the next region in the chain R1, if R0 + R1 is smaller then S, R0 and R1 * are kindly requested to merge.
  • * - *

    - * The following parameters are configurable: - *

      - *
    1. Whether to split a region as part of normalization. Configuration: - * {@value #SPLIT_ENABLED_KEY}, default: {@value #DEFAULT_SPLIT_ENABLED}.
    2. - *
    3. Whether to merge a region as part of normalization. Configuration: - * {@value #MERGE_ENABLED_KEY}, default: {@value #DEFAULT_MERGE_ENABLED}.
    4. - *
    5. The minimum number of regions in a table to consider it for merge normalization. - * Configuration: {@value #MIN_REGION_COUNT_KEY}, default: - * {@value #DEFAULT_MIN_REGION_COUNT}.
    6. - *
    7. The minimum age for a region to be considered for a merge, in days. Configuration: - * {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}, default: - * {@value #DEFAULT_MERGE_MIN_REGION_AGE_DAYS}.
    8. - *
    9. The minimum size for a region to be considered for a merge, in whole MBs. Configuration: - * {@value #MERGE_MIN_REGION_SIZE_MB_KEY}, default: - * {@value #DEFAULT_MERGE_MIN_REGION_SIZE_MB}.
    10. - *
    - *

    - * To see detailed logging of the application of these configuration values, set the log level for - * this class to `TRACE`. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class SimpleRegionNormalizer implements RegionNormalizer { +class SimpleRegionNormalizer implements RegionNormalizer { private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class); static final String SPLIT_ENABLED_KEY = "hbase.normalizer.split.enabled"; @@ -92,7 +71,6 @@ public class SimpleRegionNormalizer implements RegionNormalizer { static final String MERGE_MIN_REGION_SIZE_MB_KEY = "hbase.normalizer.merge.min_region_size.mb"; static final int DEFAULT_MERGE_MIN_REGION_SIZE_MB = 1; - private final long[] skippedCount; private Configuration conf; private MasterServices masterServices; private boolean splitEnabled; @@ -102,7 +80,6 @@ public class SimpleRegionNormalizer implements RegionNormalizer { private int mergeMinRegionSizeMb; public SimpleRegionNormalizer() { - skippedCount = new long[NormalizationPlan.PlanType.values().length]; splitEnabled = DEFAULT_SPLIT_ENABLED; mergeEnabled = DEFAULT_MERGE_ENABLED; minRegionCount = DEFAULT_MIN_REGION_COUNT; @@ -203,16 +180,6 @@ public void setMasterServices(final MasterServices masterServices) { this.masterServices = masterServices; } - @Override - public void planSkipped(final RegionInfo hri, final PlanType type) { - skippedCount[type.ordinal()]++; - } - - @Override - public long getSkippedCount(NormalizationPlan.PlanType type) { - return skippedCount[type.ordinal()]; - } - @Override public List computePlansForTable(final TableName table) { if (table == null) { @@ -371,7 +338,11 @@ private List computeMergeNormalizationPlans(final NormalizeCo final long nextSizeMb = getRegionSizeMB(next); // always merge away empty regions when they present themselves. if (currentSizeMb == 0 || nextSizeMb == 0 || currentSizeMb + nextSizeMb < avgRegionSizeMb) { - plans.add(new MergeNormalizationPlan(current, next)); + final MergeNormalizationPlan plan = new MergeNormalizationPlan.Builder() + .addTarget(current, currentSizeMb) + .addTarget(next, nextSizeMb) + .build(); + plans.add(plan); candidateIdx++; } } @@ -408,11 +379,11 @@ private List computeSplitNormalizationPlans(final NormalizeCo if (skipForSplit(ctx.getRegionStates().getRegionState(hri), hri)) { continue; } - final long regionSize = getRegionSizeMB(hri); - if (regionSize > 2 * avgRegionSize) { + final long regionSizeMb = getRegionSizeMB(hri); + if (regionSizeMb > 2 * avgRegionSize) { LOG.info("Table {}, large region {} has size {}, more than twice avg size {}, splitting", - ctx.getTableName(), hri.getRegionNameAsString(), regionSize, avgRegionSize); - plans.add(new SplitNormalizationPlan(hri)); + ctx.getTableName(), hri.getRegionNameAsString(), regionSizeMb, avgRegionSize); + plans.add(new SplitNormalizationPlan(hri, regionSizeMb)); } } return plans; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java index 7c634fbf2488..ffe68cc9f62d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java @@ -18,32 +18,23 @@ */ package org.apache.hadoop.hbase.master.normalizer; -import java.io.IOException; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; /** - * Normalization plan to split region. + * Normalization plan to split a region. */ @InterfaceAudience.Private -public class SplitNormalizationPlan implements NormalizationPlan { +final class SplitNormalizationPlan implements NormalizationPlan { - private final RegionInfo regionInfo; + private final NormalizationTarget splitTarget; - public SplitNormalizationPlan(RegionInfo regionInfo) { - this.regionInfo = regionInfo; - } - - @Override - public long submit(MasterServices masterServices) throws IOException { - return masterServices.splitRegion(regionInfo, null, HConstants.NO_NONCE, - HConstants.NO_NONCE); + SplitNormalizationPlan(final RegionInfo splitTarget, final long splitTargetSizeMb) { + this.splitTarget = new NormalizationTarget(splitTarget, splitTargetSizeMb); } @Override @@ -51,14 +42,14 @@ public PlanType getType() { return PlanType.SPLIT; } - public RegionInfo getRegionInfo() { - return regionInfo; + public NormalizationTarget getSplitTarget() { + return splitTarget; } @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("regionInfo", regionInfo) + .append("splitTarget", splitTarget) .toString(); } @@ -75,13 +66,13 @@ public boolean equals(Object o) { SplitNormalizationPlan that = (SplitNormalizationPlan) o; return new EqualsBuilder() - .append(regionInfo, that.regionInfo) + .append(splitTarget, that.splitTarget) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) - .append(regionInfo) + .append(splitTarget) .toHashCode(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java new file mode 100644 index 000000000000..e3180347dc34 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The Region Normalizer subsystem is responsible for coaxing all the regions in a table toward + * a "normal" size, according to their storefile size. It does this by splitting regions that + * are significantly larger than the norm, and merging regions that are significantly smaller than + * the norm. + *

    + * The public interface to the Region Normalizer subsystem is limited to the following classes: + *
      + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory} provides an + * entry point for creating an instance of the + * {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager}. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager} encapsulates + * the whole Region Normalizer subsystem. You'll find one of these hanging off of the + * {@link org.apache.hadoop.hbase.master.HMaster}, which uses it to delegate API calls. There + * is usually only a single instance of this class. + *
    • + *
    • + * Various configuration points that share the common prefix of {@code hbase.normalizer}. + *
        + *
      • Whether to split a region as part of normalization. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#SPLIT_ENABLED_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_SPLIT_ENABLED}. + *
      • + *
      • Whether to merge a region as part of normalization. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_ENABLED_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_ENABLED}. + *
      • + *
      • The minimum number of regions in a table to consider it for merge normalization. + * Configuration: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MIN_REGION_COUNT_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MIN_REGION_COUNT}. + *
      • + *
      • The minimum age for a region to be considered for a merge, in days. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_MIN_REGION_AGE_DAYS_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_MIN_REGION_AGE_DAYS}. + *
      • + *
      • The minimum size for a region to be considered for a merge, in whole MBs. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#MERGE_MIN_REGION_SIZE_MB_KEY}, + * default: {@value org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer#DEFAULT_MERGE_MIN_REGION_SIZE_MB}. + *
      • + *
      • The limit on total throughput of the Region Normalizer's actions, in whole MBs. Configuration: + * {@value org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker#RATE_LIMIT_BYTES_PER_SEC_KEY}, + * default: unlimited. + *
      • + *
      + *

      + * To see detailed logging of the application of these configuration values, set the log + * level for this package to `TRACE`. + *

      + *
    • + *
    + * The Region Normalizer subsystem is composed of a handful of related classes: + *
      + *
    • + * The {@link org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker} provides a system by + * which the Normalizer can be disabled at runtime. It currently does this by managing a znode, + * but this is an implementation detail. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue} is a + * {@link java.util.Set}-like {@link java.util.Queue} that permits a single copy of a given + * work item to exist in the queue at one time. It also provides a facility for a producer to + * add an item to the front of the line. Consumers are blocked waiting for new work. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore} wakes up + * periodically and schedules new normalization work, adding targets to the queue. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker} runs in a + * daemon thread, grabbing work off the queue as is it becomes available. + *
    • + *
    • + * The {@link org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer} implements the + * logic for calculating target region sizes and emitting a list of corresponding + * {@link org.apache.hadoop.hbase.master.normalizer.NormalizationPlan} objects. + *
    • + *
    + */ +package org.apache.hadoop.hbase.master.normalizer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 7c65005de55d..3f3e80960bb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.master; import static org.mockito.Mockito.mock; - import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; +import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; @@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; - import org.apache.hbase.thirdparty.com.google.protobuf.Service; public class MockNoopMasterServices implements MasterServices { @@ -109,11 +108,6 @@ public ChoreService getChoreService() { return null; } - @Override - public RegionNormalizer getRegionNormalizer() { - return null; - } - @Override public CatalogJanitor getCatalogJanitor() { return null; @@ -139,6 +133,10 @@ public MasterQuotaManager getMasterQuotaManager() { return null; } + @Override public RegionNormalizerManager getRegionNormalizerManager() { + return null; + } + @Override public ProcedureExecutor getMasterProcedureExecutor() { return null; @@ -341,6 +339,10 @@ public boolean isInMaintenanceMode() { return false; } + @Override public boolean skipRegionManagementAction(String action) { + return false; + } + @Override public long getLastMajorCompactionTimestamp(TableName table) throws IOException { return 0; @@ -507,4 +509,9 @@ public RSGroupInfoManager getRSGroupInfoManager() { public boolean isBalancerOn() { return false; } + + @Override + public boolean normalizeRegions(NormalizeTableFilterParams ntfp, boolean isHighPriority) { + return false; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java index 5aec49bdb11c..87a7e680ff8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.master; import java.lang.reflect.Field; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.ScheduledChore; @@ -30,7 +29,6 @@ import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.AfterClass; @@ -66,7 +64,7 @@ public static void tearDown() throws Exception { } @Test - public void testDefaultScheduledChores() throws Exception { + public void testDefaultScheduledChores() { // test if logCleaner chore is scheduled by default in HMaster init TestChoreField logCleanerTestChoreField = new TestChoreField<>(); LogCleaner logCleaner = logCleanerTestChoreField.getChoreObj("logCleaner"); @@ -96,10 +94,10 @@ public void testDefaultScheduledChores() throws Exception { balancerChoreTestChoreField.testIfChoreScheduled(balancerChore); // test if normalizerChore chore is scheduled by default in HMaster init - TestChoreField regionNormalizerChoreTestChoreField = + ScheduledChore regionNormalizerChore = hMaster.getRegionNormalizerManager() + .getRegionNormalizerChore(); + TestChoreField regionNormalizerChoreTestChoreField = new TestChoreField<>(); - RegionNormalizerChore regionNormalizerChore = regionNormalizerChoreTestChoreField - .getChoreObj("normalizerChore"); regionNormalizerChoreTestChoreField.testIfChoreScheduled(regionNormalizerChore); // test if catalogJanitorChore chore is scheduled by default in HMaster init @@ -114,22 +112,27 @@ public void testDefaultScheduledChores() throws Exception { hbckChoreTestChoreField.testIfChoreScheduled(hbckChore); } - + /** + * Reflect into the {@link HMaster} instance and find by field name a specified instance + * of {@link ScheduledChore}. + */ private static class TestChoreField { - private E getChoreObj(String fieldName) throws NoSuchFieldException, - IllegalAccessException { - Field masterField = HMaster.class.getDeclaredField(fieldName); - masterField.setAccessible(true); - E choreFieldVal = (E) masterField.get(hMaster); - return choreFieldVal; + @SuppressWarnings("unchecked") + private E getChoreObj(String fieldName) { + try { + Field masterField = HMaster.class.getDeclaredField(fieldName); + masterField.setAccessible(true); + return (E) masterField.get(hMaster); + } catch (Exception e) { + throw new AssertionError( + "Unable to retrieve field '" + fieldName + "' from HMaster instance.", e); + } } private void testIfChoreScheduled(E choreObj) { Assert.assertNotNull(choreObj); Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(choreObj)); } - } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java index ff88be1ef20d..6ac68b300483 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java @@ -72,8 +72,10 @@ public static void teardown() throws Exception { public void testInfo() { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master); - assertEquals(master.getSplitPlanCount(), info.getSplitPlanCount(), 0); - assertEquals(master.getMergePlanCount(), info.getMergePlanCount(), 0); + assertEquals( + master.getRegionNormalizerManager().getSplitPlanCount(), info.getSplitPlanCount(), 0); + assertEquals( + master.getRegionNormalizerManager().getMergePlanCount(), info.getMergePlanCount(), 0); assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0); assertEquals(master.getClusterId(), info.getClusterId()); assertEquals(master.getMasterActiveTime(), info.getActiveTime()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java new file mode 100644 index 000000000000..7e6c74910edf --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Tests that {@link RegionNormalizerWorkQueue} implements the contract described in its docstring. + */ +@Category({ MasterTests.class, SmallTests.class}) +public class TestRegionNormalizerWorkQueue { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionNormalizerWorkQueue.class); + + @Rule + public TestName testName = new TestName(); + + @Test + public void testElementUniquenessAndFIFO() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final List content = new LinkedList<>(); + IntStream.of(4, 3, 2, 1, 4, 3, 2, 1) + .boxed() + .forEach(queue::put); + assertEquals(4, queue.size()); + while (queue.size() > 0) { + content.add(queue.take()); + } + assertThat(content, contains(4, 3, 2, 1)); + + queue.clear(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + queue.putAll(Arrays.asList(4, 5)); + assertEquals(5, queue.size()); + content.clear(); + while (queue.size() > 0) { + content.add(queue.take()); + } + assertThat(content, contains(4, 3, 2, 1, 5)); + } + + @Test + public void testPriorityAndFIFO() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final List content = new LinkedList<>(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + assertEquals(4, queue.size()); + queue.putFirst(0); + assertEquals(5, queue.size()); + drainTo(queue, content); + assertThat("putFirst items should jump the queue, preserving existing order", + content, contains(0, 4, 3, 2, 1)); + + queue.clear(); + content.clear(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + queue.putFirst(1); + assertEquals(4, queue.size()); + drainTo(queue, content); + assertThat("existing items re-added with putFirst should jump the queue", + content, contains(1, 4, 3, 2)); + + queue.clear(); + content.clear(); + queue.putAll(Arrays.asList(4, 3, 2, 1)); + queue.putAllFirst(Arrays.asList(2, 3)); + assertEquals(4, queue.size()); + drainTo(queue, content); + assertThat( + "existing items re-added with putAllFirst jump the queue AND honor changes in priority", + content, contains(2, 3, 4, 1)); + } + + private enum Action { + PUT, + PUT_FIRST, + PUT_ALL, + PUT_ALL_FIRST, + } + + /** + * Test that the uniqueness constraint is honored in the face of concurrent modification. + */ + @Test + public void testConcurrentPut() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final int maxValue = 100; + final Runnable producer = () -> { + final Random rand = ThreadLocalRandom.current(); + for (int i = 0; i < 1_000; i++) { + final Action action = Action.values()[rand.nextInt(Action.values().length)]; + switch (action) { + case PUT: { + final int val = rand.nextInt(maxValue); + queue.put(val); + break; + } + case PUT_FIRST: { + final int val = rand.nextInt(maxValue); + queue.putFirst(val); + break; + } + case PUT_ALL: { + final List vals = rand.ints(5, 0, maxValue) + .boxed() + .collect(Collectors.toList()); + queue.putAll(vals); + break; + } + case PUT_ALL_FIRST: { + final List vals = rand.ints(5, 0, maxValue) + .boxed() + .collect(Collectors.toList()); + queue.putAllFirst(vals); + break; + } + default: + fail("Unrecognized action " + action); + } + } + }; + + final int numThreads = 5; + final CompletableFuture[] futures = IntStream.range(0, numThreads) + .mapToObj(val -> CompletableFuture.runAsync(producer)) + .toArray(CompletableFuture[]::new); + CompletableFuture.allOf(futures).join(); + + final List content = new ArrayList<>(queue.size()); + drainTo(queue, content); + assertThat("at most `maxValue` items should be present.", + content.size(), lessThanOrEqualTo(maxValue)); + assertEquals("all items should be unique.", content.size(), new HashSet<>(content).size()); + } + + /** + * Test that calls to {@link RegionNormalizerWorkQueue#take()} block the requesting thread. The + * producing thread places new entries onto the queue following a known schedule. The consuming + * thread collects a time measurement between calls to {@code take}. Finally, the test makes + * coarse-grained assertions of the consumer's observations based on the producer's schedule. + */ + @Test + public void testTake() throws Exception { + final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); + final ConcurrentLinkedQueue takeTimes = new ConcurrentLinkedQueue<>(); + final AtomicBoolean finished = new AtomicBoolean(false); + final Runnable consumer = () -> { + try { + while (!finished.get()) { + queue.take(); + takeTimes.add(System.nanoTime()); + } + } catch (InterruptedException e) { + fail("interrupted."); + } + }; + + CompletableFuture worker = CompletableFuture.runAsync(consumer); + final long testStart = System.nanoTime(); + for (int i = 0; i < 5; i++) { + Thread.sleep(10); + queue.put(i); + } + + // set finished = true and pipe one more value in case the thread needs an extra pass through + // the loop. + finished.set(true); + queue.put(1); + worker.get(1, TimeUnit.SECONDS); + + final Iterator times = takeTimes.iterator(); + assertTrue("should have timing information for at least 2 calls to take.", + takeTimes.size() >= 5); + for (int i = 0; i < 5; i++) { + assertThat( + "Observations collected in takeTimes should increase by roughly 10ms every interval", + times.next(), greaterThan(testStart + TimeUnit.MILLISECONDS.toNanos(i * 10))); + } + } + + private static void drainTo(final RegionNormalizerWorkQueue queue, Collection dest) + throws InterruptedException { + assertThat(queue.size(), greaterThan(0)); + while (queue.size() > 0) { + dest.add(queue.take()); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java new file mode 100644 index 000000000000..e3a29b854060 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.when; +import java.time.Duration; +import java.util.Arrays; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.StringDescription; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * A test over {@link RegionNormalizerWorker}. Being a background thread, the only points of + * interaction we have to this class are its input source ({@link RegionNormalizerWorkQueue} and + * its callbacks invoked against {@link RegionNormalizer} and {@link MasterServices}. The work + * queue is simple enough to use directly; for {@link MasterServices}, use a mock because, as of + * now, the worker only invokes 4 methods. + */ +@Category({ MasterTests.class, SmallTests.class}) +public class TestRegionNormalizerWorker { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionNormalizerWorker.class); + + @Rule + public TestName testName = new TestName(); + @Rule + public TableNameTestRule tableName = new TableNameTestRule(); + + @Rule + public MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private MasterServices masterServices; + @Mock + private RegionNormalizer regionNormalizer; + + private HBaseCommonTestingUtility testingUtility; + private RegionNormalizerWorkQueue queue; + private ExecutorService workerPool; + + private final AtomicReference workerThreadThrowable = new AtomicReference<>(); + + @Before + public void before() throws Exception { + MockitoAnnotations.initMocks(this); + when(masterServices.skipRegionManagementAction(any())).thenReturn(false); + testingUtility = new HBaseCommonTestingUtility(); + queue = new RegionNormalizerWorkQueue<>(); + workerThreadThrowable.set(null); + + final String threadNameFmt = + TestRegionNormalizerWorker.class.getSimpleName() + "-" + testName.getMethodName() + "-%d"; + final ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat(threadNameFmt) + .setDaemon(true) + .setUncaughtExceptionHandler((t, e) -> workerThreadThrowable.set(e)) + .build(); + workerPool = Executors.newSingleThreadExecutor(threadFactory); + } + + @After + public void after() throws Exception { + workerPool.shutdownNow(); // shutdownNow to interrupt the worker thread sitting on `take()` + assertTrue("timeout waiting for worker thread to terminate", + workerPool.awaitTermination(30, TimeUnit.SECONDS)); + final Throwable workerThrowable = workerThreadThrowable.get(); + assertThat("worker thread threw unexpected exception", workerThrowable, nullValue()); + } + + @Test + public void testMergeCounter() throws Exception { + final TableName tn = tableName.getTableName(); + final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) + .setNormalizationEnabled(true) + .build(); + when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); + when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())) + .thenReturn(1L); + when(regionNormalizer.computePlansForTable(tn)) + .thenReturn(singletonList(new MergeNormalizationPlan.Builder() + .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 10) + .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 20) + .build())); + + final RegionNormalizerWorker worker = new RegionNormalizerWorker( + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + final long beforeMergePlanCount = worker.getMergePlanCount(); + workerPool.submit(worker); + queue.put(tn); + + assertThatEventually("executing work should see plan count increase", + worker::getMergePlanCount, greaterThan(beforeMergePlanCount)); + } + + @Test + public void testSplitCounter() throws Exception { + final TableName tn = tableName.getTableName(); + final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) + .setNormalizationEnabled(true) + .build(); + when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); + when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())) + .thenReturn(1L); + when(regionNormalizer.computePlansForTable(tn)) + .thenReturn(singletonList( + new SplitNormalizationPlan(RegionInfoBuilder.newBuilder(tn).build(), 10))); + + final RegionNormalizerWorker worker = new RegionNormalizerWorker( + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + final long beforeSplitPlanCount = worker.getSplitPlanCount(); + workerPool.submit(worker); + queue.put(tn); + + assertThatEventually("executing work should see plan count increase", + worker::getSplitPlanCount, greaterThan(beforeSplitPlanCount)); + } + + /** + * Assert that a rate limit is honored, at least in a rough way. Maintainers should manually + * inspect the log messages emitted by the worker thread to confirm that expected behavior. + */ + @Test + public void testRateLimit() throws Exception { + final TableName tn = tableName.getTableName(); + final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) + .setNormalizationEnabled(true) + .build(); + final RegionInfo splitRegionInfo = RegionInfoBuilder.newBuilder(tn).build(); + final RegionInfo mergeRegionInfo1 = RegionInfoBuilder.newBuilder(tn).build(); + final RegionInfo mergeRegionInfo2 = RegionInfoBuilder.newBuilder(tn).build(); + when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); + when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())) + .thenReturn(1L); + when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())) + .thenReturn(1L); + when(regionNormalizer.computePlansForTable(tn)) + .thenReturn(Arrays.asList( + new SplitNormalizationPlan(splitRegionInfo, 2), + new MergeNormalizationPlan.Builder() + .addTarget(mergeRegionInfo1, 1) + .addTarget(mergeRegionInfo2, 2) + .build(), + new SplitNormalizationPlan(splitRegionInfo, 1))); + + final Configuration conf = testingUtility.getConfiguration(); + conf.set("hbase.normalizer.throughput.max_bytes_per_sec", "1m"); + final RegionNormalizerWorker worker = new RegionNormalizerWorker( + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + workerPool.submit(worker); + final long startTime = System.nanoTime(); + queue.put(tn); + + assertThatEventually("executing work should see split plan count increase", + worker::getSplitPlanCount, comparesEqualTo(2L)); + assertThatEventually("executing work should see merge plan count increase", + worker::getMergePlanCount, comparesEqualTo(1L)); + + final long endTime = System.nanoTime(); + assertThat("rate limited normalizer should have taken at least 5 seconds", + Duration.ofNanos(endTime - startTime), greaterThanOrEqualTo(Duration.ofSeconds(5))); + } + + /** + * Repeatedly evaluates {@code matcher} against the result of calling {@code actualSupplier} + * until the matcher succeeds or the timeout period of 30 seconds is exhausted. + */ + private void assertThatEventually( + final String reason, + final Supplier actualSupplier, + final Matcher matcher + ) throws Exception { + testingUtility.waitFor(TimeUnit.SECONDS.toMillis(30), + new Waiter.ExplainingPredicate() { + private T lastValue = null; + + @Override + public String explainFailure() { + final Description description = new StringDescription() + .appendText(reason) + .appendText("\nExpected: ") + .appendDescriptionOf(matcher) + .appendText("\n but: "); + matcher.describeMismatch(lastValue, description); + return description.toString(); + } + + @Override public boolean evaluate() { + lastValue = actualSupplier.get(); + return matcher.matches(lastValue); + } + }); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 89da907eeb09..f263cbc4fdfd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -175,8 +175,12 @@ public void testMergeOfSmallRegions() { createRegionSizesMap(regionInfos, 15, 5, 5, 15, 16); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(1), regionInfos.get(2)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(1), 5) + .addTarget(regionInfos.get(2), 5) + .build())); } // Test for situation illustrated in HBASE-14867 @@ -188,9 +192,12 @@ public void testMergeOfSecondSmallestRegions() { createRegionSizesMap(regionInfos, 1, 10000, 10000, 10000, 2700, 2700); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(4), regionInfos.get(5)) - )); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(4), 2700) + .addTarget(regionInfos.get(5), 2700) + .build())); } @Test @@ -214,7 +221,7 @@ public void testSplitOfLargeRegion() { setupMocksForNormalizer(regionSizes, regionInfos); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(3)))); + new SplitNormalizationPlan(regionInfos.get(3), 30))); } @Test @@ -229,18 +236,26 @@ public void testSplitWithTargetRegionSize() throws Exception { when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize()) .thenReturn(20L); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(2)), - new SplitNormalizationPlan(regionInfos.get(3)), - new SplitNormalizationPlan(regionInfos.get(4)), - new SplitNormalizationPlan(regionInfos.get(5)) + new SplitNormalizationPlan(regionInfos.get(2), 60), + new SplitNormalizationPlan(regionInfos.get(3), 80), + new SplitNormalizationPlan(regionInfos.get(4), 100), + new SplitNormalizationPlan(regionInfos.get(5), 120) )); // test when target region size is 200 when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize()) .thenReturn(200L); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)), - new MergeNormalizationPlan(regionInfos.get(2), regionInfos.get(3)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains( + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 20) + .addTarget(regionInfos.get(1), 40) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 60) + .addTarget(regionInfos.get(3), 80) + .build())); } @Test @@ -255,14 +270,18 @@ public void testSplitWithTargetRegionCount() throws Exception { when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount()) .thenReturn(8); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(2)), - new SplitNormalizationPlan(regionInfos.get(3)))); + new SplitNormalizationPlan(regionInfos.get(2), 60), + new SplitNormalizationPlan(regionInfos.get(3), 80))); // test when target region count is 3 when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount()) .thenReturn(3); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 20) + .addTarget(regionInfos.get(1), 40) + .build())); } @Test @@ -312,14 +331,17 @@ public void testHonorsMinimumRegionCount() { List plans = normalizer.computePlansForTable(tableName); assertThat(plans, contains( - new SplitNormalizationPlan(regionInfos.get(2)), - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)))); + new SplitNormalizationPlan(regionInfos.get(2), 10), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 1) + .build())); // have to call setupMocks again because we don't have dynamic config update on normalizer. conf.setInt(MIN_REGION_COUNT_KEY, 4); setupMocksForNormalizer(regionSizes, regionInfos); assertThat(normalizer.computePlansForTable(tableName), contains( - new SplitNormalizationPlan(regionInfos.get(2)))); + new SplitNormalizationPlan(regionInfos.get(2), 10))); } @Test @@ -356,8 +378,12 @@ public void testHonorsMergeMinRegionSize() { assertFalse(normalizer.isSplitEnabled()); assertEquals(1, normalizer.getMergeMinRegionSizeMb()); - assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)))); + assertThat( + normalizer.computePlansForTable(tableName), + contains(new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 2) + .build())); conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 3); setupMocksForNormalizer(regionSizes, regionInfos); @@ -378,9 +404,18 @@ public void testMergeEmptyRegions() { assertFalse(normalizer.isSplitEnabled()); assertEquals(0, normalizer.getMergeMinRegionSizeMb()); assertThat(normalizer.computePlansForTable(tableName), contains( - new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)), - new MergeNormalizationPlan(regionInfos.get(2), regionInfos.get(3)), - new MergeNormalizationPlan(regionInfos.get(5), regionInfos.get(6)))); + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 1) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 10) + .addTarget(regionInfos.get(3), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(5), 10) + .addTarget(regionInfos.get(6), 0) + .build())); } // This test is to make sure that normalizer is only going to merge adjacent regions. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java index 173adf49db26..f5feb59ca329 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.Collections; import java.util.Comparator; @@ -161,6 +160,7 @@ public void testHonorsNormalizerTableSetting() throws Exception { tn2 + " should not have split.", tn2RegionCount, getRegionCount(tn2)); + LOG.debug("waiting for t3 to settle..."); waitForTableRegionCount(tn3, tn3RegionCount); } finally { dropIfExists(tn1); @@ -187,7 +187,7 @@ void testRegionNormalizationSplit(boolean limitedByQuota) throws Exception { : TableName.valueOf(name.getMethodName()); final int currentRegionCount = createTableBegsSplit(tableName, true, false); - final long existingSkippedSplitCount = master.getRegionNormalizer() + final long existingSkippedSplitCount = master.getRegionNormalizerManager() .getSkippedCount(PlanType.SPLIT); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); @@ -332,7 +332,8 @@ private static void waitForSkippedSplits(final HMaster master, return "waiting to observe split attempt and skipped."; } @Override public boolean evaluate() { - final long skippedSplitCount = master.getRegionNormalizer().getSkippedCount(PlanType.SPLIT); + final long skippedSplitCount = master.getRegionNormalizerManager() + .getSkippedCount(PlanType.SPLIT); return skippedSplitCount > existingSkippedSplitCount; } }); From 0ba65d28539799ea9400822c4d94435e7cd36d03 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 9 Oct 2020 08:46:05 -0700 Subject: [PATCH 434/769] HBASE-22976 [HBCK2] Add RecoveredEditsPlayer (#2504) Make it so WALPlayer can replay recovered.edits files. hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java Allow for WAL files that do NOT have a startime in their name. Use the 'generic' WAL-filename parser instead of the one that used be local here. Implement support for 'startTime' filter. Previous was just not implemented. hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java Checkstyle. hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java Use the new general WAL name timestamp parser. hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java Utility for parsing timestamp from WAL filename. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java Export attributes about the local recovered.edits test file so other tests can play with it. Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/util/CommonFSUtils.java | 3 +- .../hbase/mapreduce/WALInputFormat.java | 82 +++++++++++-------- .../hadoop/hbase/mapreduce/WALPlayer.java | 18 ++-- .../hbase/mapreduce/TestWALInputFormat.java | 75 +++++++++++++++++ .../hadoop/hbase/mapreduce/TestWALPlayer.java | 33 ++++++-- .../hbase/mapreduce/TestWALRecordReader.java | 35 ++++---- .../hbase/wal/AbstractFSWALProvider.java | 12 +-- .../java/org/apache/hadoop/hbase/wal/WAL.java | 29 +++++++ .../regionserver/TestRecoveredEdits.java | 63 ++++++++------ src/main/asciidoc/_chapters/ops_mgt.adoc | 36 ++------ 10 files changed, 263 insertions(+), 123 deletions(-) create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index aaa3e82f23e0..2549c6df507e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -364,7 +364,8 @@ private static boolean isValidWALRootDir(Path walDir, final Configuration c) thr if (!qualifiedWalDir.equals(rootDir)) { if (qualifiedWalDir.toString().startsWith(rootDir.toString() + "/")) { throw new IllegalStateException("Illegal WAL directory specified. " + - "WAL directories are not permitted to be under the root directory if set."); + "WAL directories are not permitted to be under root directory: rootDir=" + + rootDir.toString() + ", qualifiedWALDir=" + qualifiedWalDir); } } return true; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 7c4be83a73e9..b410fc22d891 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +22,21 @@ import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; +import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; @@ -49,6 +46,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL} files. @@ -77,10 +77,6 @@ public WALSplit() {} * Represent an WALSplit, i.e. a single WAL file. * Start- and EndTime are managed by the split, so that WAL files can be * filtered before WALEdits are passed to the mapper(s). - * @param logFileName - * @param fileSize - * @param startTime - * @param endTime */ public WALSplit(String logFileName, long fileSize, long startTime, long endTime) { this.logFileName = logFileName; @@ -186,7 +182,9 @@ private void seek() throws IOException { @Override public boolean nextKeyValue() throws IOException, InterruptedException { - if (reader == null) return false; + if (reader == null) { + return false; + } this.currentPos = reader.getPosition(); Entry temp; long i = -1; @@ -204,7 +202,9 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } while (temp != null && temp.getKey().getWriteTime() < startTime); if (temp == null) { - if (i > 0) LOG.info("Skipped " + i + " entries."); + if (i > 0) { + LOG.info("Skipped " + i + " entries."); + } LOG.info("Reached end of file."); return false; } else if (i > 0) { @@ -242,7 +242,9 @@ public float getProgress() throws IOException, InterruptedException { @Override public void close() throws IOException { LOG.info("Closing reader"); - if (reader != null) this.reader.close(); + if (reader != null) { + this.reader.close(); + } } } @@ -301,40 +303,56 @@ private Path[] getInputPaths(Configuration conf) { inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); } + /** + * @param startTime If file looks like it has a timestamp in its name, we'll check if newer + * or equal to this value else we will filter out the file. If name does not + * seem to have a timestamp, we will just return it w/o filtering. + * @param endTime If file looks like it has a timestamp in its name, we'll check if older or equal + * to this value else we will filter out the file. If name does not seem to + * have a timestamp, we will just return it w/o filtering. + */ private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { List result = new ArrayList<>(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); - RemoteIterator iter = fs.listLocatedStatus(dir); - if (!iter.hasNext()) return Collections.emptyList(); + if (!iter.hasNext()) { + return Collections.emptyList(); + } while (iter.hasNext()) { LocatedFileStatus file = iter.next(); if (file.isDirectory()) { - // recurse into sub directories + // Recurse into sub directories result.addAll(getFiles(fs, file.getPath(), startTime, endTime)); } else { - String name = file.getPath().toString(); - int idx = name.lastIndexOf('.'); - if (idx > 0) { - try { - long fileStartTime = Long.parseLong(name.substring(idx+1)); - if (fileStartTime <= endTime) { - LOG.info("Found: " + file); - result.add(file); - } - } catch (NumberFormatException x) { - idx = 0; - } - } - if (idx == 0) { - LOG.warn("File " + name + " does not appear to be an WAL file. Skipping..."); - } + addFile(result, file, startTime, endTime); } } + // TODO: These results should be sorted? Results could be content of recovered.edits directory + // -- null padded increasing numeric -- or a WAL file w/ timestamp suffix or timestamp and + // then meta suffix. See AbstractFSWALProvider#WALStartTimeComparator return result; } + static void addFile(List result, LocatedFileStatus lfs, long startTime, + long endTime) { + long timestamp = WAL.getTimestamp(lfs.getPath().getName()); + if (timestamp > 0) { + // Looks like a valid timestamp. + if (timestamp <= endTime && timestamp >= startTime) { + LOG.info("Found {}", lfs.getPath()); + result.add(lfs); + } else { + LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), + startTime, Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); + } + } else { + // If no timestamp, add it regardless. + LOG.info("Found (no-timestamp!) {}", lfs); + result.add(lfs); + } + } + @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 5b1aac654414..a47a12fffb5a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; @@ -239,6 +238,7 @@ protected boolean filter(Context context, final Cell cell) { super.cleanup(context); } + @SuppressWarnings("checkstyle:EmptyBlock") @Override public void setup(Context context) throws IOException { String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY); @@ -377,17 +377,21 @@ private void usage(final String errorMsg) { System.err.println(" directory of WALs to replay."); System.err.println(" comma separated list of tables. If no tables specified,"); System.err.println(" all are imported (even hbase:meta if present)."); - System.err.println(" WAL entries can be mapped to a new set of tables by passing"); - System.err.println(" , a comma separated list of target tables."); - System.err.println(" If specified, each table in must have a mapping."); + System.err.println(" WAL entries can be mapped to a new set of tables by " + + "passing"); + System.err.println(" , a comma separated list of target " + + "tables."); + System.err.println(" If specified, each table in must have a " + + "mapping."); System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" Only one table can be specified, and no mapping allowed!"); System.err.println("To specify a time range, pass:"); System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); - System.err.println(" The start and the end date of timerange. The dates can be expressed"); - System.err.println(" in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format."); + System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); + System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + + "format."); System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java new file mode 100644 index 000000000000..8d21c394d554 --- /dev/null +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +@Category({ MapReduceTests.class, SmallTests.class}) +public class TestWALInputFormat { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWALInputFormat.class); + + /** + * Test the primitive start/end time filtering. + */ + @Test + public void testAddFile() { + List lfss = new ArrayList<>(); + LocatedFileStatus lfs = Mockito.mock(LocatedFileStatus.class); + long now = System.currentTimeMillis(); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now)); + WALInputFormat.addFile(lfss, lfs, now, now); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 1, now - 1); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 2, now - 1); + assertEquals(1, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now - 2, now); + assertEquals(2, lfss.size()); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, now); + assertEquals(3, lfss.size()); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(4, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now, now + 2); + assertEquals(5, lfss.size()); + WALInputFormat.addFile(lfss, lfs, now + 1, now + 2); + assertEquals(5, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name")); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(6, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name.123")); + WALInputFormat.addFile(lfss, lfs, Long.MIN_VALUE, Long.MAX_VALUE); + assertEquals(7, lfss.size()); + Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now + ".meta")); + WALInputFormat.addFile(lfss, lfs, now, now); + assertEquals(8, lfss.size()); + } +} diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 432aff1dd044..d60a3d9b712c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +24,8 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.PrintStream; import java.util.ArrayList; import org.apache.hadoop.conf.Configuration; @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.WALPlayer.WALKeyValueMapper; +import org.apache.hadoop.hbase.regionserver.TestRecoveredEdits; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; @@ -73,7 +74,6 @@ */ @Category({MapReduceTests.class, LargeTests.class}) public class TestWALPlayer { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALPlayer.class); @@ -91,7 +91,7 @@ public class TestWALPlayer { @BeforeClass public static void beforeClass() throws Exception { - conf= TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.createRootDir(); walRootDir = TEST_UTIL.createWALRootDir(); fs = CommonFSUtils.getRootDirFileSystem(conf); @@ -106,9 +106,32 @@ public static void afterClass() throws Exception { logFs.delete(walRootDir, true); } + /** + * Test that WALPlayer can replay recovered.edits files. + */ + @Test + public void testPlayingRecoveredEdit() throws Exception { + TableName tn = TableName.valueOf(TestRecoveredEdits.RECOVEREDEDITS_TABLENAME); + TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); + // Copy testing recovered.edits file that is over under hbase-server test resources + // up into a dir in our little hdfs cluster here. + String hbaseServerTestResourcesEdits = System.getProperty("test.build.classes") + + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + assertTrue(new File(hbaseServerTestResourcesEdits).exists()); + FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); + // Target dir. + Path targetDir = new Path("edits").makeQualified(dfs.getUri(), dfs.getHomeDirectory()); + assertTrue(dfs.mkdirs(targetDir)); + dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); + assertEquals(0, + ToolRunner.run(new WALPlayer(this.conf), new String [] {targetDir.toString()})); + // I don't know how many edits are in this file for this table... so just check more than 1. + assertTrue(TEST_UTIL.countRows(tn) > 0); + } + /** * Simple end-to-end test - * @throws Exception */ @Test public void testWALPlayer() throws Exception { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 21a43bd6b47f..40e7f37147a7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; @@ -42,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -123,8 +123,7 @@ public static void tearDownAfterClass() throws Exception { } /** - * Test partial reads from the log based on passed time range - * @throws Exception + * Test partial reads from the WALs based on passed time range. */ @Test public void testPartialRead() throws Exception { @@ -140,6 +139,7 @@ public void testPartialRead() throws Exception { edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts+1, value)); log.appendData(info, getWalKeyImpl(ts+1, scopes), edit); log.sync(); + Threads.sleep(10); LOG.info("Before 1st WAL roll " + log.toString()); log.rollWriter(); LOG.info("Past 1st WAL roll " + log.toString()); @@ -164,26 +164,29 @@ public void testPartialRead() throws Exception { jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); jobConf.setLong(WALInputFormat.END_TIME_KEY, ts); - // only 1st file is considered, and only its 1st entry is used + // Only 1st file is considered, and only its 1st entry is in-range. List splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); - jobConf.setLong(WALInputFormat.START_TIME_KEY, ts+1); jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1+1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - // both files need to be considered assertEquals(2, splits.size()); - // only the 2nd entry from the 1st file is used - testSplit(splits.get(0), Bytes.toBytes("2")); - // only the 1nd entry from the 2nd file is used + // Both entries from first file are in-range. + testSplit(splits.get(0), Bytes.toBytes("1"), Bytes.toBytes("2")); + // Only the 1st entry from the 2nd file is in-range. testSplit(splits.get(1), Bytes.toBytes("3")); + + jobConf.setLong(WALInputFormat.START_TIME_KEY, ts + 1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1); + splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); + assertEquals(1, splits.size()); + // Only the 1st entry from the 2nd file is in-range. + testSplit(splits.get(0), Bytes.toBytes("3")); } /** * Test basic functionality - * @throws Exception */ @Test public void testWALRecordReader() throws Exception { @@ -234,11 +237,7 @@ public void testWALRecordReader() throws Exception { jobConf.setLong(WALInputFormat.END_TIME_KEY, Long.MAX_VALUE); jobConf.setLong(WALInputFormat.START_TIME_KEY, thirdTs); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); - // both logs need to be considered - assertEquals(2, splits.size()); - // but both readers skip all edits - testSplit(splits.get(0)); - testSplit(splits.get(1)); + assertTrue(splits.isEmpty()); } /** @@ -346,4 +345,4 @@ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) } reader.close(); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 6f9c87b00518..e7bdb0bf6789 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.wal; -import static org.apache.commons.lang3.StringUtils.isNumeric; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; @@ -439,17 +438,12 @@ public int compare(Path o1, Path o2) { * @return start time */ private static long getTS(Path p) { - String name = p.getName(); - String [] splits = name.split("\\."); - String ts = splits[splits.length - 1]; - if (!isNumeric(ts)) { - // Its a '.meta' or a '.syncrep' suffix. - ts = splits[splits.length - 2]; - } - return Long.parseLong(ts); + return WAL.getTimestamp(p.getName()); } } + + public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 747b2770d457..20379fd7fe9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -32,6 +32,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import static org.apache.commons.lang3.StringUtils.isNumeric; /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides @@ -299,4 +300,32 @@ public String toString() { return this.key + "=" + this.edit; } } + + /** + * Split a WAL filename to get a start time. WALs usually have the time we start writing to them + * as part of their name, usually the suffix. Sometimes there will be an extra suffix as when it + * is a WAL for the meta table. For example, WALs might look like this + * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a + * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have + * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending + * order. Here is an example: 0000000000000016310. Allow for this. + * @param name Name of the WAL file. + * @return Timestamp or -1. + */ + public static long getTimestamp(String name) { + String [] splits = name.split("\\."); + if (splits.length <= 1) { + return -1; + } + String timestamp = splits[splits.length - 1]; + if (!isNumeric(timestamp)) { + // Its a '.meta' or a '.syncrep' suffix. + timestamp = splits[splits.length - 2]; + if (!isNumeric(timestamp)) { + return -1; + } + } + return Long.parseLong(timestamp); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index ba1d17dd4c6b..c287e02dc97d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -78,6 +78,32 @@ public class TestRecoveredEdits { @Rule public TestName testName = new TestName(); + /** + * Path to a recovered.edits file in hbase-server test resources folder. + * This is a little fragile getting this path to a file of 10M of edits. + */ + @SuppressWarnings("checkstyle:VisibilityModifier") + public static final Path RECOVEREDEDITS_PATH = new Path( + System.getProperty("test.build.classes", "target/test-classes"), + "0000000000000016310"); + + /** + * Name of table referenced by edits in the recovered.edits file. + */ + public static final String RECOVEREDEDITS_TABLENAME = "IntegrationTestBigLinkedList"; + + /** + * Column family referenced by edits in the recovered.edits file. + */ + public static final byte [] RECOVEREDEDITS_COLUMNFAMILY = Bytes.toBytes("meta"); + public static final byte[][] RECOVEREDITS_COLUMNFAMILY_ARRAY = + new byte[][] {RECOVEREDEDITS_COLUMNFAMILY}; + public static final ColumnFamilyDescriptor RECOVEREDEDITS_CFD = + ColumnFamilyDescriptorBuilder.newBuilder(RECOVEREDEDITS_COLUMNFAMILY).build(); + + /** + * Name of table mentioned edits from recovered.edits + */ @BeforeClass public static void setUpBeforeClass() throws Exception { blockCache = BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration()); @@ -102,13 +128,9 @@ private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy po // Set it so we flush every 1M or so. Thats a lot. conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy).toLowerCase()); - // The file of recovered edits has a column family of 'meta'. - final String columnFamily = "meta"; - byte[][] columnFamilyAsByteArray = new byte[][] { Bytes.toBytes(columnFamily) }; - TableDescriptor tableDescriptor = TableDescriptorBuilder - .newBuilder(TableName.valueOf(testName.getMethodName())).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(columnFamily)).build()) - .build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder. + newBuilder(TableName.valueOf(testName.getMethodName())). + setColumnFamily(RECOVEREDEDITS_CFD) .build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); final String encodedRegionName = hri.getEncodedName(); Path hbaseRootDir = TEST_UTIL.getDataTestDir(); @@ -123,24 +145,20 @@ private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy po HRegion region = HBaseTestingUtility .createRegionAndWAL(hri, hbaseRootDir, conf, tableDescriptor, blockCache); assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName()); - List storeFiles = region.getStoreFileList(columnFamilyAsByteArray); + List storeFiles = region.getStoreFileList(RECOVEREDITS_COLUMNFAMILY_ARRAY); // There should be no store files. assertTrue(storeFiles.isEmpty()); region.close(); Path regionDir = FSUtils.getRegionDirFromRootDir(hbaseRootDir, hri); Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regionDir); - // This is a little fragile getting this path to a file of 10M of edits. - Path recoveredEditsFile = new Path( - System.getProperty("test.build.classes", "target/test-classes"), - "0000000000000016310"); // Copy this file under the region's recovered.edits dir so it is replayed on reopen. - Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName()); - fs.copyToLocalFile(recoveredEditsFile, destination); + Path destination = new Path(recoveredEditsDir, RECOVEREDEDITS_PATH.getName()); + fs.copyToLocalFile(RECOVEREDEDITS_PATH, destination); assertTrue(fs.exists(destination)); // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay. region = HRegion.openHRegion(region, null); assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName()); - storeFiles = region.getStoreFileList(columnFamilyAsByteArray); + storeFiles = region.getStoreFileList(RECOVEREDITS_COLUMNFAMILY_ARRAY); // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if // we flush at 1MB, that there are at least 3 flushed files that are there because of the // replay of edits. @@ -150,19 +168,16 @@ private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy po assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10); } // Now verify all edits made it into the region. - int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region); + int count = verifyAllEditsMadeItIn(fs, conf, RECOVEREDEDITS_PATH, region); + assertTrue(count > 0); LOG.info("Checked " + count + " edits made it in"); } /** - * @param fs - * @param conf - * @param edits - * @param region * @return Return how many edits seen. - * @throws IOException */ - private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf, + // Used by TestWALPlayer over in hbase-mapreduce too. + public static int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf, final Path edits, final HRegion region) throws IOException { int count = 0; // Read all cells from recover edits diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index 6ea23655d3c7..d27c5d6e488f 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -424,32 +424,11 @@ See <>. For bulk replaying WAL files or _recovered.edits_ files, see <>. For reading/verifying individual files, read on. -[[hlog_tool]] -==== FSHLog tool - -The main method on `FSHLog` offers manual split and dump facilities. -Pass it WALs or the product of a split, the content of the _recovered.edits_. -directory. - -You can get a textual dump of a WAL file content by doing the following: - ----- - $ ./bin/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --dump hdfs://example.org:8020/hbase/WALs/example.org,60020,1283516293161/10.10.21.10%3A60020.1283973724012 ----- - -The return code will be non-zero if there are any issues with the file so you can test wholesomeness of file by redirecting `STDOUT` to `/dev/null` and testing the program return. - -Similarly you can force a split of a log file directory by doing: - ----- - $ ./bin/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --split hdfs://example.org:8020/hbase/WALs/example.org,60020,1283516293161/ ----- - [[hlog_tool.prettyprint]] -===== WALPrettyPrinter +==== WALPrettyPrinter -The `WALPrettyPrinter` is a tool with configurable options to print the contents of a WAL. -You can invoke it via the HBase cli with the 'wal' command. +The `WALPrettyPrinter` is a tool with configurable options to print the contents of a WAL +or a _recovered.edits_ file. You can invoke it via the HBase cli with the 'wal' command. ---- $ ./bin/hbase wal hdfs://example.org:8020/hbase/WALs/example.org,60020,1283516293161/10.10.21.10%3A60020.1283973724012 @@ -904,7 +883,10 @@ The output can optionally be mapped to another set of tables. WALPlayer can also generate HFiles for later bulk importing, in that case only a single table and no mapping can be specified. -.WALPrettyPrinter/FSHLog Tool +Finally, you can use WALPlayer to replay the content of a Regions `recovered.edits` directory (the files under +`recovered.edits` directory have the same format as WAL files). + +.WALPrettyPrinter [NOTE] ==== To read or verify single WAL files or _recovered.edits_ files, since they share the WAL format, @@ -945,8 +927,8 @@ To generate HFiles to bulk load instead of loading HBase directly, pass: To specify a time range, pass: -Dwal.start.time=[date|ms] -Dwal.end.time=[date|ms] - The start and the end date of timerange. The dates can be expressed - in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format. + The start and the end date of timerange (inclusive). The dates can be + expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS format. E.g. 1234567890120 or 2009-02-13T23:32:30.12 Other options: -Dmapreduce.job.name=jobName From 96bc9c67919446b150a0a262cf2a8cec93513dfe Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 10 Oct 2020 08:22:12 +0800 Subject: [PATCH 435/769] HBASE-25163 Increase the timeout value for nightly jobs (#2512) Signed-off-by: stack Signed-off-by: Jan Hentschel Signed-off-by: Viraj Jasani --- dev-support/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index c250dcefe604..01d50197fbc5 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -25,7 +25,7 @@ pipeline { } options { buildDiscarder(logRotator(numToKeepStr: '15')) - timeout (time: 9, unit: 'HOURS') + timeout (time: 16, unit: 'HOURS') timestamps() skipDefaultCheckout() disableConcurrentBuilds() From 3b753b54872b76b52dcdb3ae32154cc54141ddf5 Mon Sep 17 00:00:00 2001 From: XinSun Date: Sat, 10 Oct 2020 16:09:12 +0800 Subject: [PATCH 436/769] HBASE-25171 Remove ZNodePaths.namespaceZNode (#2526) Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index a0065a9e9cbf..71936b9f36d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -74,8 +74,6 @@ public class ZNodePaths { public final String regionNormalizerZNode; // znode containing the state of all switches, currently there are split and merge child node. public final String switchZNode; - // znode containing namespace descriptors - public final String namespaceZNode; // znode of indicating master maintenance mode public final String masterMaintZNode; @@ -106,7 +104,6 @@ public ZNodePaths(Configuration conf) { regionNormalizerZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); - namespaceZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); masterMaintZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); replicationZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication")); @@ -134,7 +131,6 @@ public String toString() { .append(", balancerZNode=").append(balancerZNode) .append(", regionNormalizerZNode=").append(regionNormalizerZNode) .append(", switchZNode=").append(switchZNode) - .append(", namespaceZNode=").append(namespaceZNode) .append(", masterMaintZNode=").append(masterMaintZNode) .append(", replicationZNode=").append(replicationZNode) .append(", peersZNode=").append(peersZNode) From a7b5e5ba23a4ecfd82707d62f3ad31fdcec0a45e Mon Sep 17 00:00:00 2001 From: Qi Yu Date: Sat, 10 Oct 2020 16:09:39 +0800 Subject: [PATCH 437/769] HBASE-25162 Make flaky tests run more aggressively (#2525) Change the trigger interval from 12h to 4h Signed-off-by: Duo Zhang --- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index 282b83115883..959ae31a0767 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -21,7 +21,7 @@ pipeline { } } triggers { - cron('H */12 * * *') // Every four hours. See https://jenkins.io/doc/book/pipeline/syntax/#cron-syntax + cron('H H/4 * * *') // Every four hours. See https://jenkins.io/doc/book/pipeline/syntax/#cron-syntax } options { // this should roughly match how long we tell the flaky dashboard to look at From 509d061781b7c27d173d8860b027b60aa37d0038 Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Sun, 11 Oct 2020 10:46:06 +0530 Subject: [PATCH 438/769] HBASE-25065 WAL archival to be done by a separate thread (#2501) * HBASE-25065 WAL archival can be batched/throttled and also done by a separate thread * Fix checkstyle issues * Address review comments * checkstyle comments * Addressing final review comments Signed-off-by: Michael Stack --- .../hbase/master/region/MasterRegion.java | 2 +- .../hbase/regionserver/HRegionServer.java | 2 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 69 +++++++++++++++++-- .../hbase/regionserver/wal/AsyncFSWAL.java | 15 +++- .../hadoop/hbase/regionserver/wal/FSHLog.java | 30 ++++++-- .../hbase/wal/AbstractFSWALProvider.java | 8 ++- .../hadoop/hbase/wal/AsyncFSWALProvider.java | 8 +-- .../hadoop/hbase/wal/DisabledWALProvider.java | 5 +- .../hadoop/hbase/wal/FSHLogProvider.java | 6 +- .../hbase/wal/RegionGroupingProvider.java | 9 ++- .../hbase/wal/SyncReplicationWALProvider.java | 7 +- .../apache/hadoop/hbase/wal/WALFactory.java | 19 +++-- .../apache/hadoop/hbase/wal/WALProvider.java | 5 +- .../regionserver/TestFailedAppendAndSync.java | 44 +++++++++++- .../wal/AbstractTestLogRolling.java | 7 +- .../hadoop/hbase/wal/IOTestProvider.java | 4 +- .../hadoop/hbase/wal/TestWALFactory.java | 2 +- 17 files changed, 199 insertions(+), 43 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java index 81da59d6b665..688a5497ddc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java @@ -301,7 +301,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException params.archivedWalSuffix(), params.rollPeriodMs(), params.flushSize()); walRoller.start(); - WALFactory walFactory = new WALFactory(conf, server.getServerName().toString(), false); + WALFactory walFactory = new WALFactory(conf, server.getServerName().toString(), server, false); Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName()); HRegion region; if (fs.exists(tableDir)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index d6eb45fe65e1..d51eab4ec400 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1906,7 +1906,7 @@ private void setupWALAndReplication() throws IOException { boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster && !LoadBalancer.isMasterCanHostUserRegions(conf); WALFactory factory = - new WALFactory(conf, serverName.toString(), !isMasterNoTableOrSystemTableOnly); + new WALFactory(conf, serverName.toString(), this, !isMasterNoTableOrSystemTableOnly); if (!isMasterNoTableOrSystemTableOnly) { // TODO Replication make assumptions here based on the default filesystem impl Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index d2c624ab446c..ac99ea671e5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -41,6 +41,8 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -53,6 +55,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -84,8 +87,12 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + + + + /** * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS. Only one @@ -185,6 +192,8 @@ public abstract class AbstractFSWAL implements WAL { */ protected final Configuration conf; + protected final Abortable abortable; + /** Listeners that are called on WAL events. */ protected final List listeners = new CopyOnWriteArrayList<>(); @@ -329,6 +338,11 @@ public WalProps(Map encodedName2HighestSequenceId, long logSize) { protected final AtomicBoolean rollRequested = new AtomicBoolean(false); + private final ExecutorService logArchiveExecutor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("WAL-Archiver-%d").build()); + + private final int archiveRetries; + public long getFilenum() { return this.filenum.get(); } @@ -380,10 +394,19 @@ protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String lo final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, final String suffix) throws FailedLogCloseException, IOException { + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + } + + protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Path rootDir, + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) + throws FailedLogCloseException, IOException { this.fs = fs; this.walDir = new Path(rootDir, logDir); this.walArchiveDir = new Path(rootDir, archiveDir); this.conf = conf; + this.abortable = abortable; if (!fs.exists(walDir) && !fs.mkdirs(walDir)) { throw new IOException("Unable to mkdir " + walDir); @@ -482,6 +505,8 @@ protected SyncFuture initialValue() { this.walTooOldNs = TimeUnit.SECONDS.toNanos(conf.getInt( SURVIVED_TOO_LONG_SEC_KEY, SURVIVED_TOO_LONG_SEC_DEFAULT)); this.useHsync = conf.getBoolean(HRegion.WAL_HSYNC_CONF_KEY, HRegion.DEFAULT_WAL_HSYNC); + archiveRetries = this.conf.getInt("hbase.regionserver.walroll.archive.retries", 0); + } /** @@ -715,11 +740,39 @@ private void cleanOldLogs() throws IOException { regionsBlockingThisWal.clear(); } } + if (logsToArchive != null) { - for (Pair logAndSize : logsToArchive) { - this.totalLogSize.addAndGet(-logAndSize.getSecond()); - archiveLogFile(logAndSize.getFirst()); - this.walFile2Props.remove(logAndSize.getFirst()); + final List> localLogsToArchive = logsToArchive; + // make it async + for (Pair log : localLogsToArchive) { + logArchiveExecutor.execute(() -> { + archive(log); + }); + this.walFile2Props.remove(log.getFirst()); + } + } + } + + protected void archive(final Pair log) { + int retry = 1; + while (true) { + try { + archiveLogFile(log.getFirst()); + totalLogSize.addAndGet(-log.getSecond()); + // successful + break; + } catch (Throwable e) { + if (retry > archiveRetries) { + LOG.error("Failed log archiving for the log {},", log.getFirst(), e); + if (this.abortable != null) { + this.abortable.abort("Failed log archiving", e); + break; + } + } else { + LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, + e); + } + retry++; } } } @@ -732,7 +785,8 @@ public static Path getWALArchivePath(Path archiveDir, Path p) { return new Path(archiveDir, p.getName()); } - private void archiveLogFile(final Path p) throws IOException { + @VisibleForTesting + protected void archiveLogFile(final Path p) throws IOException { Path newPath = getWALArchivePath(this.walArchiveDir, p); // Tell our listeners that a log is going to be archived. if (!this.listeners.isEmpty()) { @@ -907,6 +961,9 @@ public void shutdown() throws IOException { rollWriterLock.lock(); try { doShutdown(); + if (logArchiveExecutor != null) { + logArchiveExecutor.shutdownNow(); + } } finally { rollWriterLock.unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index a40e50335d99..342446098be8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -44,9 +44,11 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput; @@ -60,7 +62,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -68,6 +69,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor; + /** * An asynchronous implementation of FSWAL. *

    @@ -206,7 +208,16 @@ public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, Configuration conf, List listeners, boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, Class channelClass) throws FailedLogCloseException, IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, + eventLoopGroup, channelClass); + } + + public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDir, + String archiveDir, Configuration conf, List listeners, + boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, + Class channelClass) throws FailedLogCloseException, IOException { + super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, + suffix); this.eventLoopGroup = eventLoopGroup; this.channelClass = channelClass; Supplier hasConsumerTask; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 001be00d8a11..fe910aa067b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -40,10 +40,12 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.trace.TraceUtil; @@ -62,10 +64,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * The default implementation of FSWAL. */ @@ -168,7 +170,7 @@ public class FSHLog extends AbstractFSWAL { private final int waitOnShutdownInSeconds; private final ExecutorService closeExecutor = Executors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Close-WAL-Writer-%d").build()); /** * Exception handler to pass the disruptor ringbuffer. Same as native implementation only it logs @@ -208,11 +210,25 @@ public FSHLog(final FileSystem fs, final Path root, final String logDir, final C this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null); } + @VisibleForTesting + public FSHLog(final FileSystem fs, Abortable abortable, final Path root, final String logDir, + final Configuration conf) throws IOException { + this(fs, abortable, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, + null); + } + + public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, + final String archiveDir, final Configuration conf, final List listeners, + final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { + this(fs, null, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + } + /** * Create an edit log at the given dir location. You should never have to load an * existing log. If there is a log at startup, it should have already been processed and deleted * by the time the WAL object is started up. * @param fs filesystem handle + * @param abortable Abortable - the server here * @param rootDir path to where logs and oldlogs * @param logDir dir where wals are stored * @param archiveDir dir where wals are archived @@ -226,10 +242,12 @@ public FSHLog(final FileSystem fs, final Path root, final String logDir, final C * @param suffix will be url encoded. null is treated as empty. non-empty must start with * {@link org.apache.hadoop.hbase.wal.AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} */ - public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, final List listeners, - final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { - super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir, + final String logDir, final String archiveDir, final Configuration conf, + final List listeners, final boolean failIfWALExists, final String prefix, + final String suffix) throws IOException { + super(fs, abortable, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, + suffix); this.minTolerableReplication = conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); this.lowReplicationRollLimit = conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index e7bdb0bf6789..84c94e608168 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.wal; + import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; @@ -29,10 +30,12 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -88,6 +91,7 @@ public interface Reader extends WAL.Reader { protected AtomicBoolean initialized = new AtomicBoolean(false); // for default wal provider, logPrefix won't change protected String logPrefix; + protected Abortable abortable; /** * We use walCreateLock to prevent wal recreation in different threads, and also prevent getWALs @@ -102,7 +106,8 @@ public interface Reader extends WAL.Reader { * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -119,6 +124,7 @@ public void init(WALFactory factory, Configuration conf, String providerId) thro } } logPrefix = sb.toString(); + this.abortable = abortable; doInit(conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 062b3688d3e4..3a2ffa7600bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -65,11 +65,11 @@ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long @Override protected AsyncFSWAL createWAL() throws IOException { - return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf), - getWALDirectoryName(factory.factoryId), + return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, - eventLoopGroup, channelClass); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, eventLoopGroup, + channelClass); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 0ff2195eaa04..6e5a0538296c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -25,8 +25,10 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -55,7 +57,8 @@ class DisabledWALProvider implements WALProvider { WAL disabled; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (null != disabled) { throw new IllegalStateException("WALProvider.init should only be called once."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index 3b91c2475cfe..e64d70f50981 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -67,7 +67,7 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, * Public because of FSHLog. Should be package-private */ public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, - final boolean overwritable, long blocksize) throws IOException { + final boolean overwritable, long blocksize) throws IOException { // Configuration already does caching for the Class lookup. Class logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, @@ -101,8 +101,8 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, @Override protected FSHLog createWAL() throws IOException { - return new FSHLog(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf), - getWALDirectoryName(factory.factoryId), + return new FSHLog(CommonFSUtils.getWALFileSystem(conf), abortable, + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 764d3d521ace..20d043b6ae26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -28,7 +28,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; @@ -137,14 +139,17 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, private List listeners = new ArrayList<>(); private String providerId; private Class providerClass; + private Abortable abortable; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (null != strategy) { throw new IllegalStateException("WALProvider.init should only be called once."); } this.conf = conf; this.factory = factory; + this.abortable = abortable; if (META_WAL_PROVIDER_ID.equals(providerId)) { // do not change the provider id if it is for meta @@ -171,7 +176,7 @@ public void init(WALFactory factory, Configuration conf, String providerId) thro private WALProvider createProvider(String group) throws IOException { WALProvider provider = WALFactory.createProvider(providerClass); provider.init(factory, conf, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group); + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group, this.abortable); provider.addWALActionsListener(new MetricsWAL()); return provider; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index 9859c204649f..001e1a8ea955 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -35,7 +35,9 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL; @@ -108,11 +110,12 @@ public void setPeerInfoProvider(SyncReplicationPeerInfoProvider peerInfoProvider } @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } - provider.init(factory, conf, providerId); + provider.init(factory, conf, providerId, abortable); this.conf = conf; this.factory = factory; Pair> eventLoopGroupAndChannelClass = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 26b87277a13a..6a5feb0f4181 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -21,9 +21,11 @@ import java.io.InterruptedIOException; import java.util.List; import java.util.concurrent.atomic.AtomicReference; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; @@ -35,7 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -86,6 +87,7 @@ static enum Providers { public static final String WAL_ENABLED = "hbase.regionserver.hlog.enabled"; final String factoryId; + final Abortable abortable; private final WALProvider provider; // The meta updates are written to a different wal. If this // regionserver holds meta regions, then this ref will be non-null. @@ -119,6 +121,7 @@ private WALFactory(Configuration conf) { // this instance can't create wals, just reader/writers. provider = null; factoryId = SINGLETON_ID; + this.abortable = null; } @VisibleForTesting @@ -175,7 +178,7 @@ static WALProvider createProvider(Class clazz) throws IOE public WALFactory(Configuration conf, String factoryId) throws IOException { // default enableSyncReplicationWALProvider is true, only disable SyncReplicationWALProvider // for HMaster or HRegionServer which take system table only. See HBASE-19999 - this(conf, factoryId, true); + this(conf, factoryId, null, true); } /** @@ -183,11 +186,12 @@ public WALFactory(Configuration conf, String factoryId) throws IOException { * instances. * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations * to make a directory + * @param abortable the server associated with this WAL file * @param enableSyncReplicationWALProvider whether wrap the wal provider to a * {@link SyncReplicationWALProvider} */ - public WALFactory(Configuration conf, String factoryId, boolean enableSyncReplicationWALProvider) - throws IOException { + public WALFactory(Configuration conf, String factoryId, Abortable abortable, + boolean enableSyncReplicationWALProvider) throws IOException { // until we've moved reader/writer construction down into providers, this initialization must // happen prior to provider initialization, in case they need to instantiate a reader/writer. timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); @@ -196,20 +200,21 @@ public WALFactory(Configuration conf, String factoryId, boolean enableSyncReplic AbstractFSWALProvider.Reader.class); this.conf = conf; this.factoryId = factoryId; + this.abortable = abortable; // end required early initialization if (conf.getBoolean(WAL_ENABLED, true)) { WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); if (enableSyncReplicationWALProvider) { provider = new SyncReplicationWALProvider(provider); } - provider.init(this, conf, null); + provider.init(this, conf, null, this.abortable); provider.addWALActionsListener(new MetricsWAL()); this.provider = provider; } else { // special handling of existing configuration behavior. LOG.warn("Running with WAL disabled."); provider = new DisabledWALProvider(); - provider.init(this, conf, factoryId); + provider.init(this, conf, factoryId, null); } } @@ -274,7 +279,7 @@ WALProvider getMetaProvider() throws IOException { clz = getProviderClass(META_WAL_PROVIDER, conf.get(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); } provider = createProvider(clz); - provider.init(this, conf, AbstractFSWALProvider.META_WAL_PROVIDER_ID); + provider.init(this, conf, AbstractFSWALProvider.META_WAL_PROVIDER_ID, this.abortable); provider.addWALActionsListener(new MetricsWAL()); if (metaProvider.compareAndSet(null, provider)) { return provider; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index c3bd14995077..01c1d11ead70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -23,7 +23,9 @@ import java.util.List; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -46,7 +48,8 @@ public interface WALProvider { * @param conf may not be null * @param providerId differentiate between providers from one factory. may be null */ - void init(WALFactory factory, Configuration conf, String providerId) throws IOException; + void init(WALFactory factory, Configuration conf, String providerId, Abortable server) + throws IOException; /** * @param region the region which we want to get a WAL for it. Could be null. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java index a9ce54845e46..fdf96dab87fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALProvider.Writer; @@ -107,11 +110,13 @@ String getName() { class DodgyFSLog extends FSHLog { volatile boolean throwSyncException = false; volatile boolean throwAppendException = false; + volatile boolean throwArchiveException = false; + final AtomicLong rolls = new AtomicLong(0); - public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) + public DodgyFSLog(FileSystem fs, Server server, Path root, String logDir, Configuration conf) throws IOException { - super(fs, root, logDir, conf); + super(fs, server, root, logDir, conf); } @Override @@ -122,6 +127,18 @@ public Map> rollWriter(boolean force) return regions; } + @Override + protected void archiveLogFile(Path p) throws IOException { + if (throwArchiveException) { + throw new IOException("throw archival exception"); + } + } + + @Override + protected void archive(Pair localLogsToArchive) { + super.archive(localLogsToArchive); + } + @Override protected Writer createWriterInstance(Path path) throws IOException { final Writer w = super.createWriterInstance(path); @@ -176,7 +193,7 @@ public void testLockupAroundBadAssignSync() throws IOException { // the test. FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + getName()); - DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF); + DodgyFSLog dodgyWAL = new DodgyFSLog(fs, (Server)services, rootDir, getName(), CONF); dodgyWAL.init(); LogRoller logRoller = new LogRoller(services); logRoller.addWAL(dodgyWAL); @@ -256,6 +273,27 @@ public void testLockupAroundBadAssignSync() throws IOException { Threads.sleep(1); } } + + try { + dodgyWAL.throwAppendException = false; + dodgyWAL.throwSyncException = false; + dodgyWAL.throwArchiveException = true; + Pair pair = new Pair(); + pair.setFirst(new Path("/a/b/")); + pair.setSecond(100L); + dodgyWAL.archive(pair); + } catch (Throwable ioe) { + } + while (true) { + try { + // one more abort needs to be called + Mockito.verify(services, Mockito.atLeast(2)).abort(Mockito.anyString(), + (Throwable) Mockito.anyObject()); + break; + } catch (WantedButNotInvoked t) { + Threads.sleep(1); + } + } } finally { // To stop logRoller, its server has to say it is stopped. Mockito.when(services.isStopped()).thenReturn(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 4c19aa0a8244..6e2059d9f30b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -175,10 +175,15 @@ public void testLogRollOnNothingWritten() throws Exception { } } - private void assertLogFileSize(WAL log) { + private void assertLogFileSize(WAL log) throws InterruptedException { if (AbstractFSWALProvider.getNumRolledLogFiles(log) > 0) { assertTrue(AbstractFSWALProvider.getLogFileSize(log) > 0); } else { + for (int i = 0; i < 10; i++) { + if (AbstractFSWALProvider.getLogFileSize(log) != 0) { + Thread.sleep(10); + } + } assertEquals(0, AbstractFSWALProvider.getLogFileSize(log)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java index d062c77cb336..ecbd0432be18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; // imports for things that haven't moved from regionserver.wal yet. @@ -99,7 +100,8 @@ private enum AllowedOperations { * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) + throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index a899bdcb4538..656932bc117c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -687,7 +687,7 @@ public void testWALProviders() throws IOException { assertEquals(wrappedWALProvider.getClass(), walFactory.getMetaProvider().getClass()); // if providers are not set and do not enable SyncReplicationWALProvider - walFactory = new WALFactory(conf, this.currentServername.toString(), false); + walFactory = new WALFactory(conf, this.currentServername.toString(), null, false); assertEquals(walFactory.getWALProvider().getClass(), walFactory.getMetaProvider().getClass()); } From 52a08fd3482e98f4f1d26b2f654638a94dbbff11 Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Sun, 11 Oct 2020 19:12:56 +0800 Subject: [PATCH 439/769] HBASE-24840 Avoid shows closing region task when create table (#2226) --- .../apache/hadoop/hbase/monitoring/TaskMonitor.java | 10 ++++++++-- .../org/apache/hadoop/hbase/regionserver/HRegion.java | 10 ++++++++-- .../apache/hadoop/hbase/util/ModifyRegionUtils.java | 2 +- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index 1bde91553628..d2edaa8b1d84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -87,8 +87,12 @@ public static synchronized TaskMonitor get() { } return instance; } - + public synchronized MonitoredTask createStatus(String description) { + return createStatus(description, false); + } + + public synchronized MonitoredTask createStatus(String description, boolean ignore) { MonitoredTask stat = new MonitoredTaskImpl(); stat.setDescription(description); MonitoredTask proxy = (MonitoredTask) Proxy.newProxyInstance( @@ -99,7 +103,9 @@ public synchronized MonitoredTask createStatus(String description) { if (tasks.isFull()) { purgeExpiredTasks(); } - tasks.add(pair); + if (!ignore) { + tasks.add(pair); + } return proxy; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index a208d9330042..57a1e1f5de93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1578,6 +1578,10 @@ public Map> close() throws IOException { */ public static final long MAX_FLUSH_PER_CHANGES = 1000000000; // 1G + public Map> close(boolean abort) throws IOException { + return close(abort, false); + } + /** * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. @@ -1586,6 +1590,7 @@ public Map> close() throws IOException { * time-sensitive thread. * * @param abort true if server is aborting (only during testing) + * @param ignoreStatus true if ignore the status (wont be showed on task list) * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of StoreFile objects. Can be null if * we are not to close at this time or we are already closed. @@ -1595,12 +1600,13 @@ public Map> close() throws IOException { * because a Snapshot was not properly persisted. The region is put in closing mode, and the * caller MUST abort after this. */ - public Map> close(boolean abort) throws IOException { + public Map> close(boolean abort, boolean ignoreStatus) + throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus( "Closing region " + this.getRegionInfo().getEncodedName() + - (abort ? " due to abort" : "")); + (abort ? " due to abort" : ""), ignoreStatus); status.enableStatusJournal(true); status.setStatus("Waiting for close lock"); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index b4e586392cf2..a3a0c7b23a63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -186,7 +186,7 @@ public static RegionInfo createRegion(final Configuration conf, final Path rootD } } finally { // 3. Close the new region to flush to disk. Close log file too. - region.close(); + region.close(false, true); } return region.getRegionInfo(); } From 2bc768b5beaff79950c42b5e3119374cc5758812 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Sun, 11 Oct 2020 06:52:11 -0700 Subject: [PATCH 440/769] HBASE-25156 TestMasterFailover.testSimpleMasterFailover is flaky (ADDENDUM) (#2529) Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/master/TestMasterFailover.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 1e37fcb52b58..2be53a6d26bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -129,7 +129,7 @@ public void testSimpleMasterFailover() throws Exception { // wait for the active master to acknowledge loss of the backup from ZK final HMaster activeFinal = active; TEST_UTIL.waitFor( - TimeUnit.SECONDS.toMillis(30), () -> activeFinal.getBackupMasters().size() == 1); + TimeUnit.MINUTES.toMillis(5), () -> activeFinal.getBackupMasters().size() == 1); // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); From df69169aa94763b5ef8c232a2abe84dd677d4845 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Sun, 11 Oct 2020 10:11:32 -0500 Subject: [PATCH 441/769] HBASE-25175 Remove the constructors of HBaseConfiguration (#2530) Co-authored-by: niuyulin Signed-off-by: Jan Hentschel --- .../hadoop/hbase/HBaseConfiguration.java | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 67de5fb3a21b..70467f08aa01 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -36,36 +36,6 @@ public class HBaseConfiguration extends Configuration { private static final Logger LOG = LoggerFactory.getLogger(HBaseConfiguration.class); - /** - * Instantiating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create() to construct a plain Configuration - * @deprecated since 0.90.0. Please use {@link #create()} instead. - * @see #create() - * @see HBASE-2036 - */ - @Deprecated - public HBaseConfiguration() { - //TODO:replace with private constructor, HBaseConfiguration should not extend Configuration - super(); - addHbaseResources(this); - LOG.warn("instantiating HBaseConfiguration() is deprecated. Please use" - + " HBaseConfiguration#create() to construct a plain Configuration"); - } - - /** - * Instantiating HBaseConfiguration() is deprecated. Please use - * HBaseConfiguration#create(conf) to construct a plain Configuration - * @deprecated since 0.90.0. Please use {@link #create(Configuration)} instead. - * @see #create(Configuration) - * @see HBASE-2036 - */ - @Deprecated - public HBaseConfiguration(final Configuration c) { - //TODO:replace with private constructor - this(); - merge(this, c); - } - private static void checkDefaultsVersion(Configuration conf) { if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return; String defaultsVersion = conf.get("hbase.defaults.for.version"); From 2231919690bbfcae06bed311ba7d03adc1d70e98 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Sun, 11 Oct 2020 22:02:37 -0500 Subject: [PATCH 442/769] HBASE-25093 the RSGroupBasedLoadBalancer#retainAssignment throws NPE (#2450) Signed-off-by: Duo Zhang --- .../favored/FavoredNodeLoadBalancer.java | 3 +++ .../hadoop/hbase/master/LoadBalancer.java | 5 ++-- .../master/assignment/AssignmentManager.java | 6 +---- .../master/balancer/BaseLoadBalancer.java | 27 +++++++++++-------- .../balancer/FavoredStochasticBalancer.java | 10 ++++--- .../rsgroup/RSGroupBasedLoadBalancer.java | 23 ++++++++-------- .../apache/hadoop/hbase/TestZooKeeper.java | 3 +++ 7 files changed, 45 insertions(+), 32 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index 8cde76e07c60..60a2c6cae13f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -22,12 +22,14 @@ import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.SECONDARY; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.TERTIARY; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -161,6 +163,7 @@ public List balanceTable(TableName tableName, } @Override + @NonNull public Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { Map> assignmentMap; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index b7ec1a3aa1bc..90cb3946f8b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hbase.master; -import edu.umd.cs.findbugs.annotations.Nullable; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.List; import java.util.Map; @@ -110,6 +110,7 @@ List balanceTable(TableName tableName, * Perform a Round Robin assignment of regions. * @return Map of servername to regioninfos */ + @NonNull Map> roundRobinAssignment(List regions, List servers) throws IOException; @@ -117,7 +118,7 @@ Map> roundRobinAssignment(List regions, * Assign regions to the previously hosting region server * @return List of plans */ - @Nullable + @NonNull Map> retainAssignment(Map regions, List servers) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index fb64514a3377..f23d17026f3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -2165,12 +2165,8 @@ private void acceptPlan(final HashMap regions, final ProcedureEvent[] events = new ProcedureEvent[regions.size()]; final long st = System.currentTimeMillis(); - if (plan == null) { - throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); - } - if (plan.isEmpty()) { - return; + throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); } int evcount = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 6a27a6a05680..a47bff26a090 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -1132,11 +1133,9 @@ protected List balanceMasterRegions(Map * If master is configured to carry system tables only, in here is * where we figure what to assign it. */ + @NonNull protected Map> assignMasterSystemRegions( Collection regions, List servers) { - if (servers == null || regions == null || regions.isEmpty()) { - return null; - } Map> assignments = new TreeMap<>(); if (this.maintenanceMode || this.onlySystemTablesOnMaster) { if (masterServerName != null && servers.contains(masterServerName)) { @@ -1267,15 +1266,16 @@ protected final boolean idleRegionServerExist(Cluster c){ * * @param regions all regions * @param servers all servers - * @return map of server to the regions it should take, or null if no - * assignment is possible (ie. no regions or no servers) + * @return map of server to the regions it should take, or emptyMap if no + * assignment is possible (ie. no servers) */ @Override + @NonNull public Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterSystemRegions(regions, servers); - if (assignments != null && !assignments.isEmpty()) { + if (!assignments.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -1285,14 +1285,17 @@ public Map> roundRobinAssignment(List r regions.removeAll(masterRegions); } } - if (this.maintenanceMode || regions == null || regions.isEmpty()) { + /** + * only need assign system table + */ + if (this.maintenanceMode || regions.isEmpty()) { return assignments; } int numServers = servers == null ? 0 : servers.size(); if (numServers == 0) { LOG.warn("Wanted to do round robin assignment but no servers to assign to"); - return null; + return Collections.emptyMap(); } // TODO: instead of retainAssignment() and roundRobinAssignment(), we should just run the @@ -1407,15 +1410,17 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve * * @param regions regions and existing assignment from meta * @param servers available servers - * @return map of servers and regions to be assigned to them + * @return map of servers and regions to be assigned to them, or emptyMap if no + * assignment is possible (ie. no servers) */ @Override + @NonNull public Map> retainAssignment(Map regions, List servers) throws HBaseIOException { // Update metrics metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterSystemRegions(regions.keySet(), servers); - if (assignments != null && !assignments.isEmpty()) { + if (!assignments.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -1430,7 +1435,7 @@ public Map> retainAssignment(Map> roundRobinAssignment(List regions, List servers) throws HBaseIOException { @@ -116,7 +119,7 @@ public Map> roundRobinAssignment(List r Set regionSet = Sets.newHashSet(regions); Map> assignmentMap = assignMasterSystemRegions(regions, servers); - if (assignmentMap != null && !assignmentMap.isEmpty()) { + if (!assignmentMap.isEmpty()) { servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); @@ -367,14 +370,15 @@ private void updateFavoredNodesForRegion(RegionInfo regionInfo, List * Reuse BaseLoadBalancer's retainAssignment, but generate favored nodes when its missing. */ @Override + @NonNull public Map> retainAssignment(Map regions, List servers) throws HBaseIOException { Map> assignmentMap = Maps.newHashMap(); Map> result = super.retainAssignment(regions, servers); - if (result == null || result.isEmpty()) { + if (result.isEmpty()) { LOG.warn("Nothing to assign to, probably no servers or no regions"); - return null; + return result; } // Guarantee not to put other regions on master diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 50ddb416e911..db61c01dec9b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.rsgroup; +import edu.umd.cs.findbugs.annotations.NonNull; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -174,25 +175,25 @@ public List balanceCluster( } @Override - public Map> roundRobinAssignment( - List regions, List servers) throws IOException { + @NonNull + public Map> roundRobinAssignment(List regions, + List servers) throws IOException { Map> assignments = Maps.newHashMap(); List, List>> pairs = generateGroupAssignments(regions, servers); for (Pair, List> pair : pairs) { - Map> result = this.internalBalancer - .roundRobinAssignment(pair.getFirst(), pair.getSecond()); - if (result != null) { - result.forEach((server, regionInfos) -> - assignments.computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); - } + Map> result = + this.internalBalancer.roundRobinAssignment(pair.getFirst(), pair.getSecond()); + result.forEach((server, regionInfos) -> assignments + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(regionInfos)); } return assignments; } @Override + @NonNull public Map> retainAssignment(Map regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { try { Map> assignments = new TreeMap<>(); List, List>> pairs = @@ -203,8 +204,8 @@ public Map> retainAssignment(Map currentAssignmentMap.put(r, regions.get(r))); Map> pairResult = this.internalBalancer.retainAssignment(currentAssignmentMap, pair.getSecond()); - pairResult.forEach((server, rs) -> - assignments.computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); + pairResult.forEach((server, rs) -> assignments + .computeIfAbsent(server, s -> Lists.newArrayList()).addAll(rs)); } return assignments; } catch (IOException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 4bbb3ed9c178..c0eacae0a18d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -21,8 +21,10 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import edu.umd.cs.findbugs.annotations.NonNull; import java.util.List; import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -280,6 +282,7 @@ static class MockLoadBalancer extends SimpleLoadBalancer { static boolean retainAssignCalled = false; @Override + @NonNull public Map> retainAssignment( Map regions, List servers) throws HBaseIOException { retainAssignCalled = true; From b9da96f73d6066635fb78c523e0ad1ec3c6208c6 Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 9 Oct 2020 21:00:30 -0700 Subject: [PATCH 443/769] HBASE-25168 Unify WAL name timestamp parsers Signed-off-by: Duo Zhang Signed-off-by: Peter Somogyi --- .../hbase/mapreduce/WALInputFormat.java | 2 +- .../hbase/wal/AbstractFSWALProvider.java | 54 +++++++++++++------ .../java/org/apache/hadoop/hbase/wal/WAL.java | 29 ---------- 3 files changed, 39 insertions(+), 46 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index b410fc22d891..14bfec72efe8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -336,7 +336,7 @@ private List getFiles(FileSystem fs, Path dir, long startTime, long static void addFile(List result, LocatedFileStatus lfs, long startTime, long endTime) { - long timestamp = WAL.getTimestamp(lfs.getPath().getName()); + long timestamp = AbstractFSWALProvider.getTimestamp(lfs.getPath().getName()); if (timestamp > 0) { // Looks like a valid timestamp. if (timestamp <= endTime && timestamp >= startTime) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 84c94e608168..109e1107669e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -258,32 +258,37 @@ static void requestLogRoll(final WAL wal) { public static final String SPLITTING_EXT = "-splitting"; /** - * It returns the file create timestamp from the file name. For name format see + * Pattern used to validate a WAL file name see {@link #validateWALFilename(String)} for + * description. + */ + private static final Pattern WAL_FILE_NAME_PATTERN = + Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); + + /** + * Define for when no timestamp found. + */ + private static final long NO_TIMESTAMP = -1L; + + /** + * It returns the file create timestamp (the 'FileNum') from the file name. For name format see * {@link #validateWALFilename(String)} public until remaining tests move to o.a.h.h.wal * @param wal must not be null * @return the file number that is part of the WAL file name */ @VisibleForTesting public static long extractFileNumFromWAL(final WAL wal) { - final Path walName = ((AbstractFSWAL) wal).getCurrentFileName(); - if (walName == null) { + final Path walPath = ((AbstractFSWAL) wal).getCurrentFileName(); + if (walPath == null) { throw new IllegalArgumentException("The WAL path couldn't be null"); } - Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(walName.getName()); - if (matcher.matches()) { - return Long.parseLong(matcher.group(2)); - } else { - throw new IllegalArgumentException(walName.getName() + " is not a valid wal file name"); + String name = walPath.getName(); + long timestamp = getTimestamp(name); + if (timestamp == NO_TIMESTAMP) { + throw new IllegalArgumentException(name + " is not a valid wal file name"); } + return timestamp; } - /** - * Pattern used to validate a WAL file name see {@link #validateWALFilename(String)} for - * description. - */ - private static final Pattern WAL_FILE_NAME_PATTERN = - Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); - /** * A WAL file name is of the format: <wal-name>{@link #WAL_FILE_NAME_DELIMITER} * <file-creation-timestamp>[.<suffix>]. provider-name is usually made up of a @@ -295,6 +300,23 @@ public static boolean validateWALFilename(String filename) { return WAL_FILE_NAME_PATTERN.matcher(filename).matches(); } + /** + * Split a WAL filename to get a start time. WALs usually have the time we start writing to them + * with as part of their name, usually the suffix. Sometimes there will be an extra suffix as when + * it is a WAL for the meta table. For example, WALs might look like this + * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a + * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have + * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending + * order. Here is an example: 0000000000000016310. Allow for this. + * @param name Name of the WAL file. + * @return Timestamp or {@link #NO_TIMESTAMP}. + */ + public static long getTimestamp(String name) { + Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(name); + return matcher.matches() ? Long.parseLong(matcher.group(2)): NO_TIMESTAMP; + } + /** * Construct the directory name for all WALs on a given server. Dir names currently look like this * for WALs: hbase//WALs/kalashnikov.att.net,61634,1486865297088. @@ -444,7 +466,7 @@ public int compare(Path o1, Path o2) { * @return start time */ private static long getTS(Path p) { - return WAL.getTimestamp(p.getName()); + return getTimestamp(p.getName()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 20379fd7fe9e..747b2770d457 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -32,7 +32,6 @@ import org.apache.yetus.audience.InterfaceStability; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import static org.apache.commons.lang3.StringUtils.isNumeric; /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides @@ -300,32 +299,4 @@ public String toString() { return this.key + "=" + this.edit; } } - - /** - * Split a WAL filename to get a start time. WALs usually have the time we start writing to them - * as part of their name, usually the suffix. Sometimes there will be an extra suffix as when it - * is a WAL for the meta table. For example, WALs might look like this - * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the - * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a - * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have - * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending - * order. Here is an example: 0000000000000016310. Allow for this. - * @param name Name of the WAL file. - * @return Timestamp or -1. - */ - public static long getTimestamp(String name) { - String [] splits = name.split("\\."); - if (splits.length <= 1) { - return -1; - } - String timestamp = splits[splits.length - 1]; - if (!isNumeric(timestamp)) { - // Its a '.meta' or a '.syncrep' suffix. - timestamp = splits[splits.length - 2]; - if (!isNumeric(timestamp)) { - return -1; - } - } - return Long.parseLong(timestamp); - } } From aeae97eb9326464e953dde7fb56367e856d0fb37 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 13 Oct 2020 09:43:56 +0800 Subject: [PATCH 444/769] HBASE-25164 Make ModifyTableProcedure support changing meta replica count (#2513) Signed-off-by: Michael Stack --- .../org/apache/hadoop/hbase/HConstants.java | 13 ++ .../hbase/IntegrationTestMetaReplicas.java | 2 +- .../apache/hadoop/hbase/master/HMaster.java | 60 ++++--- .../hbase/master/MasterMetaBootstrap.java | 111 ------------- .../master/assignment/AssignmentManager.java | 19 ++- .../master/assignment/RegionStateStore.java | 157 +++++++++--------- .../master/procedure/InitMetaProcedure.java | 6 +- .../hadoop/hbase/util/FSTableDescriptors.java | 9 +- .../resources/hbase-webapps/master/table.jsp | 4 +- .../client/AbstractTestRegionLocator.java | 4 +- .../client/MetaWithReplicasTestBase.java | 5 +- .../hbase/client/RegionReplicaTestHelper.java | 8 +- .../TestAsyncAdminWithRegionReplicas.java | 8 +- .../client/TestAsyncMetaRegionLocator.java | 3 +- .../client/TestAsyncTableUseMetaReplicas.java | 3 +- .../hbase/client/TestCleanupMetaReplica.java | 55 ++++++ .../TestCleanupMetaReplicaThroughConfig.java | 69 ++++++++ .../TestFailedMetaReplicaAssigment.java | 47 +++++- .../TestIncreaseMetaReplicaThroughConfig.java | 68 ++++++++ .../hbase/client/TestMasterRegistry.java | 5 +- .../client/TestMetaRegionLocationCache.java | 4 +- .../client/TestMetaWithReplicasBasic.java | 25 +-- .../hbase/client/TestReplicaWithCluster.java | 6 +- .../client/TestZKConnectionRegistry.java | 10 +- .../hbase/master/AlwaysStandByHMaster.java | 2 +- .../TestReadAndWriteRegionInfoFile.java | 2 +- 26 files changed, 410 insertions(+), 295 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 5b4b6fb4bd63..41bf487de055 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1131,7 +1131,20 @@ public enum OperationStatusCode { /** Conf key for enabling meta replication */ public static final String USE_META_REPLICAS = "hbase.meta.replicas.use"; public static final boolean DEFAULT_USE_META_REPLICAS = false; + + /** + * @deprecated Since 2.4.0, will be removed in 4.0.0. Please change the meta replicas number by + * altering meta table, i.e, set a new 'region replication' number and call + * modifyTable. + */ + @Deprecated public static final String META_REPLICAS_NUM = "hbase.meta.replica.count"; + /** + * @deprecated Since 2.4.0, will be removed in 4.0.0. Please change the meta replicas number by + * altering meta table, i.e, set a new 'region replication' number and call + * modifyTable. + */ + @Deprecated public static final int DEFAULT_META_REPLICA_NUM = 1; /** diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java index d906bfd8420c..05e203607f53 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java @@ -53,11 +53,11 @@ public static void setUp() throws Exception { if (util == null) { util = new IntegrationTestingUtility(); } - util.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); util.getConfiguration().setInt( StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); // Make sure there are three servers. util.initializeCluster(3); + HBaseTestingUtility.setReplicas(util.getAdmin(), TableName.META_TABLE_NAME, 3); ZKWatcher zkw = util.getZooKeeperWatcher(); Configuration conf = util.getConfiguration(); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 9c617bbe7f8e..85ac5e0b0490 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1015,10 +1015,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc status.setStatus("Initializing meta table if this is a new deploy"); InitMetaProcedure initMetaProc = null; // Print out state of hbase:meta on startup; helps debugging. - RegionState rs = this.assignmentManager.getRegionStates(). - getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO); - LOG.info("hbase:meta {}", rs); - if (rs != null && rs.isOffline()) { + if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) { Optional optProc = procedureExecutor.getProcedures().stream() .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); initMetaProc = optProc.orElseGet(() -> { @@ -1070,6 +1067,39 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc this.assignmentManager.joinCluster(); // The below depends on hbase:meta being online. this.tableStateManager.start(); + + // for migrating from a version without HBASE-25099, and also for honoring the configuration + // first. + if (conf.get(HConstants.META_REPLICAS_NUM) != null) { + int replicasNumInConf = + conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); + TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME); + if (metaDesc.getRegionReplication() != replicasNumInConf) { + // it is possible that we already have some replicas before upgrading, so we must set the + // region replication number in meta TableDescriptor directly first, without creating a + // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas. + int existingReplicasCount = + assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); + if (existingReplicasCount > metaDesc.getRegionReplication()) { + LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) + .setRegionReplication(existingReplicasCount).build(); + tableDescriptors.update(metaDesc); + } + // check again, and issue a ModifyTableProcedure if needed + if (metaDesc.getRegionReplication() != replicasNumInConf) { + LOG.info( + "The {} config is {} while the replica count in TableDescriptor is {}" + + " for hbase:meta, altering...", + HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); + procedureExecutor.submitProcedure(new ModifyTableProcedure( + procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) + .setRegionReplication(replicasNumInConf).build(), + null, metaDesc, false)); + } + } + } // Below has to happen after tablestatemanager has started in the case where this hbase-2.x // is being started over an hbase-1.x dataset. tablestatemanager runs a migration as part // of its 'start' moving table state from zookeeper to hbase:meta. This migration needs to @@ -1133,13 +1163,6 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc } assignmentManager.checkIfShouldMoveSystemRegionAsync(); - status.setStatus("Assign meta replicas"); - MasterMetaBootstrap metaBootstrap = createMetaBootstrap(); - try { - metaBootstrap.assignMetaReplicas(); - } catch (IOException | KeeperException e){ - LOG.error("Assigning meta replica failed: ", e); - } status.setStatus("Starting quota manager"); initQuotaManager(); if (QuotaUtil.isQuotaEnabled(conf)) { @@ -1294,21 +1317,6 @@ private void initMobCleaner() { getChoreService().scheduleChore(mobFileCompactionChore); } - /** - *

    - * Create a {@link MasterMetaBootstrap} instance. - *

    - *

    - * Will be overridden in tests. - *

    - */ - @VisibleForTesting - protected MasterMetaBootstrap createMetaBootstrap() { - // We put this out here in a method so can do a Mockito.spy and stub it out - // w/ a mocked up MasterMetaBootstrap. - return new MasterMetaBootstrap(this); - } - /** *

    * Create a {@link ServerManager} instance. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java deleted file mode 100644 index 0b3476fc9dd5..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; -import java.util.List; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Used by the HMaster on startup to split meta logs and assign the meta table. - */ -@InterfaceAudience.Private -class MasterMetaBootstrap { - private static final Logger LOG = LoggerFactory.getLogger(MasterMetaBootstrap.class); - - private final HMaster master; - - public MasterMetaBootstrap(HMaster master) { - this.master = master; - } - - /** - * For assigning hbase:meta replicas only. - * TODO: The way this assign runs, nothing but chance to stop all replicas showing up on same - * server as the hbase:meta region. - */ - void assignMetaReplicas() - throws IOException, InterruptedException, KeeperException { - int numReplicas = master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); - final AssignmentManager assignmentManager = master.getAssignmentManager(); - if (!assignmentManager.isMetaLoaded()) { - throw new IllegalStateException("hbase:meta must be initialized first before we can " + - "assign out its replicas"); - } - ServerName metaServername = MetaTableLocator.getMetaRegionLocation(this.master.getZooKeeper()); - for (int i = 1; i < numReplicas; i++) { - // Get current meta state for replica from zk. - RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i); - RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, i); - LOG.debug(hri.getRegionNameAsString() + " replica region state from zookeeper=" + metaState); - if (metaServername.equals(metaState.getServerName())) { - metaState = null; - LOG.info(hri.getRegionNameAsString() + - " old location is same as current hbase:meta location; setting location as null..."); - } - // These assigns run inline. All is blocked till they complete. Only interrupt is shutting - // down hosting server which calls AM#stop. - if (metaState != null && metaState.getServerName() != null) { - // Try to retain old assignment. - assignmentManager.assignAsync(hri, metaState.getServerName()); - } else { - assignmentManager.assignAsync(hri); - } - } - unassignExcessMetaReplica(numReplicas); - } - - private void unassignExcessMetaReplica(int numMetaReplicasConfigured) { - final ZKWatcher zooKeeper = master.getZooKeeper(); - // unassign the unneeded replicas (for e.g., if the previous master was configured - // with a replication of 3 and now it is 2, we need to unassign the 1 unneeded replica) - try { - List metaReplicaZnodes = zooKeeper.getMetaReplicaNodes(); - for (String metaReplicaZnode : metaReplicaZnodes) { - int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaZnode); - if (replicaId >= numMetaReplicasConfigured) { - RegionState r = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId); - LOG.info("Closing excess replica of meta region " + r.getRegion()); - // send a close and wait for a max of 30 seconds - ServerManager.closeRegionSilentlyAndWait(master.getAsyncClusterConnection(), - r.getServerName(), r.getRegion(), 30000); - ZKUtil.deleteNode(zooKeeper, zooKeeper.getZNodePaths().getZNodeForReplica(replicaId)); - } - } - } catch (Exception ex) { - // ignore the exception since we don't want the master to be wedged due to potential - // issues in the cleanup of the extra regions. We can do that cleanup via hbck or manually - LOG.warn("Ignoring exception " + ex); - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index f23d17026f3c..9a88533f3565 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -228,13 +229,18 @@ public void start() throws IOException, KeeperException { // load meta region state ZKWatcher zkw = master.getZooKeeper(); // it could be null in some tests - if (zkw != null) { + if (zkw == null) { + return; + } + List metaZNodes = zkw.getMetaReplicaNodes(); + LOG.debug("hbase:meta replica znodes: {}", metaZNodes); + for (String metaZNode : metaZNodes) { + int replicaId = zkw.getZNodePaths().getMetaReplicaIdFromZNode(metaZNode); // here we are still in the early steps of active master startup. There is only one thread(us) // can access AssignmentManager and create region node, so here we do not need to lock the // region node. - RegionState regionState = MetaTableLocator.getMetaRegionState(zkw); - RegionStateNode regionNode = - regionStates.getOrCreateRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO); + RegionState regionState = MetaTableLocator.getMetaRegionState(zkw, replicaId); + RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(regionState.getRegion()); regionNode.setRegionLocation(regionState.getServerName()); regionNode.setState(regionState.getState()); if (regionNode.getProcedure() != null) { @@ -243,7 +249,10 @@ public void start() throws IOException, KeeperException { if (regionState.getServerName() != null) { regionStates.addRegionToServer(regionNode); } - setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN); + if (RegionReplicaUtil.isDefaultReplica(replicaId)) { + setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN); + } + LOG.debug("Loaded hbase:meta {}", regionNode); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 78f2bb75fe8c..c8b0e351f4e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -64,6 +64,8 @@ import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; @@ -126,24 +128,23 @@ public boolean visit(final Result r) throws IOException { } /** - * Queries META table for the passed region encoded name, - * delegating action upon results to the RegionStateVisitor - * passed as second parameter. + * Queries META table for the passed region encoded name, delegating action upon results to the + * RegionStateVisitor passed as second parameter. * @param regionEncodedName encoded name for the Region we want to query META for. * @param visitor The RegionStateVisitor instance to react over the query results. * @throws IOException If some error occurs while querying META or parsing results. */ public void visitMetaForRegion(final String regionEncodedName, final RegionStateVisitor visitor) - throws IOException { - Result result = MetaTableAccessor. - scanByRegionEncodedName(master.getConnection(), regionEncodedName); + throws IOException { + Result result = + MetaTableAccessor.scanByRegionEncodedName(master.getConnection(), regionEncodedName); if (result != null) { visitMetaEntry(visitor, result); } } private void visitMetaEntry(final RegionStateVisitor visitor, final Result result) - throws IOException { + throws IOException { final RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result); if (rl == null) return; @@ -178,18 +179,18 @@ void updateRegionLocation(RegionStateNode regionStateNode) throws IOException { updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(), regionStateNode.getState()); } else { - long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() - : HConstants.NO_SEQNUM; + long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() : + HConstants.NO_SEQNUM; updateUserRegionLocation(regionStateNode.getRegionInfo(), regionStateNode.getState(), regionStateNode.getRegionLocation(), openSeqNum, // The regionStateNode may have no procedure in a test scenario; allow for this. - regionStateNode.getProcedure() != null ? regionStateNode.getProcedure().getProcId() - : Procedure.NO_PROC_ID); + regionStateNode.getProcedure() != null ? regionStateNode.getProcedure().getProcId() : + Procedure.NO_PROC_ID); } } private void updateMetaLocation(RegionInfo regionInfo, ServerName serverName, State state) - throws IOException { + throws IOException { try { MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName, regionInfo.getReplicaId(), state); @@ -199,8 +200,7 @@ private void updateMetaLocation(RegionInfo regionInfo, ServerName serverName, St } private void updateUserRegionLocation(RegionInfo regionInfo, State state, - ServerName regionLocation, long openSeqNum, - long pid) throws IOException { + ServerName regionLocation, long openSeqNum, long pid) throws IOException { long time = EnvironmentEdgeManager.currentTime(); final int replicaId = regionInfo.getReplicaId(); final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time); @@ -210,7 +210,7 @@ private void updateUserRegionLocation(RegionInfo regionInfo, State state, .append(regionInfo.getEncodedName()).append(", regionState=").append(state); if (openSeqNum >= 0) { Preconditions.checkArgument(state == State.OPEN && regionLocation != null, - "Open region should be on a server"); + "Open region should be on a server"); MetaTableAccessor.addLocation(put, regionLocation, openSeqNum, replicaId); // only update replication barrier for default replica if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID && @@ -223,30 +223,23 @@ private void updateUserRegionLocation(RegionInfo regionInfo, State state, } else if (regionLocation != null) { // Ideally, if no regionLocation, write null to the hbase:meta but this will confuse clients // currently; they want a server to hit. TODO: Make clients wait if no location. - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getServerNameColumn(replicaId)) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(regionLocation.getServerName())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getServerNameColumn(replicaId)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(regionLocation.getServerName())).build()); info.append(", regionLocation=").append(regionLocation); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(getStateColumn(replicaId)) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(state.name())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getStateColumn(replicaId)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name())) + .build()); LOG.info(info.toString()); updateRegionLocation(regionInfo, state, put); } private void updateRegionLocation(RegionInfo regionInfo, State state, Put put) - throws IOException { + throws IOException { try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) { table.put(put); } catch (IOException e) { @@ -319,7 +312,7 @@ private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws } // ============================================================================================ - // Update Region Splitting State helpers + // Update Region Splitting State helpers // ============================================================================================ /** * Splits the region into two in an atomic operation. Offlines the parent region with the @@ -370,7 +363,7 @@ public void splitRegion(RegionInfo parent, RegionInfo splitA, RegionInfo splitB, } // ============================================================================================ - // Update Region Merging State helpers + // Update Region Merging State helpers // ============================================================================================ public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serverName, TableDescriptor htd) throws IOException { @@ -378,7 +371,7 @@ public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serv long time = HConstants.LATEST_TIMESTAMP; List mutations = new ArrayList<>(); List replicationParents = new ArrayList<>(); - for (RegionInfo ri: parents) { + for (RegionInfo ri : parents) { long seqNum = globalScope ? getOpenSeqNumForParentRegion(ri) : -1; // Deletes for merging regions mutations.add(MetaTableAccessor.makeDeleteFromRegionInfo(ri, time)); @@ -438,8 +431,7 @@ public List getMergeRegions(RegionInfo region) throws IOException { * @param connection connection we're using * @param mergeRegion the merged region */ - public void deleteMergeQualifiers(RegionInfo mergeRegion) - throws IOException { + public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException { // NOTE: We are doing a new hbase:meta read here. Cell[] cells = getRegionCatalogResult(mergeRegion).rawCells(); if (cells == null || cells.length == 0) { @@ -493,7 +485,7 @@ static Put addMergeRegions(Put put, Collection mergeRegions) throws } // ============================================================================================ - // Delete Region State helpers + // Delete Region State helpers // ============================================================================================ /** * Deletes the specified region. @@ -554,39 +546,52 @@ private Scan getScanForUpdateRegionReplicas(TableName tableName) { public void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount) throws IOException { - Scan scan = getScanForUpdateRegionReplicas(tableName); - List deletes = new ArrayList<>(); - long now = EnvironmentEdgeManager.currentTime(); - try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { - for (;;) { - Result result = scanner.next(); - if (result == null) { - break; - } - RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); - if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { - continue; - } - Delete delete = new Delete(result.getRow()); + if (TableName.isMetaTableName(tableName)) { + ZKWatcher zk = master.getZooKeeper(); + try { for (int i = newReplicaCount; i < oldReplicaCount; i++) { - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), - now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), - now); - delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getRegionStateColumn(i), - now); + ZKUtil.deleteNode(zk, zk.getZNodePaths().getZNodeForReplica(i)); } - deletes.add(delete); + } catch (KeeperException e) { + throw new IOException(e); + } + } else { + Scan scan = getScanForUpdateRegionReplicas(tableName); + List deletes = new ArrayList<>(); + long now = EnvironmentEdgeManager.currentTime(); + try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result); + if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) { + continue; + } + Delete delete = new Delete(result.getRow()); + for (int i = newReplicaCount; i < oldReplicaCount; i++) { + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), + now); + delete.addColumns(HConstants.CATALOG_FAMILY, + CatalogFamilyFormat.getRegionStateColumn(i), now); + } + deletes.add(delete); + } + debugLogMutations(deletes); + metaTable.delete(deletes); } - debugLogMutations(deletes); - metaTable.delete(deletes); } } // ========================================================================== - // Table Descriptors helpers + // Table Descriptors helpers // ========================================================================== private boolean hasGlobalReplicationScope(TableName tableName) throws IOException { return hasGlobalReplicationScope(getDescriptor(tableName)); @@ -605,7 +610,7 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { } // ========================================================================== - // Region State + // Region State // ========================================================================== /** @@ -613,29 +618,29 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { * @return the region state, or null if unknown. */ public static State getRegionState(final Result r, RegionInfo regionInfo) { - Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, - getStateColumn(regionInfo.getReplicaId())); + Cell cell = + r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(regionInfo.getReplicaId())); if (cell == null || cell.getValueLength() == 0) { return null; } - String state = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); + String state = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); try { return State.valueOf(state); } catch (IllegalArgumentException e) { - LOG.warn("BAD value {} in hbase:meta info:state column for region {} , " + - "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", - state, regionInfo.getEncodedName()); + LOG.warn( + "BAD value {} in hbase:meta info:state column for region {} , " + + "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", + state, regionInfo.getEncodedName()); return null; } } private static byte[] getStateColumn(int replicaId) { - return replicaId == 0 - ? HConstants.STATE_QUALIFIER - : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.STATE_QUALIFIER : + Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } private static void debugLogMutations(List mutations) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java index f158452296c8..e92fc110aba2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -83,9 +82,8 @@ private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) t // created here in bootstrap and it'll need to be cleaned up. Better to // not make it in first place. Turn off block caching for bootstrap. // Enable after. - TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, - rootDir, builder -> builder.setRegionReplication( - conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM))); + TableDescriptor metaDescriptor = + FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir); HRegion .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null) .close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index afff1c139311..937069f92be0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.lang3.NotImplementedException; @@ -121,20 +120,16 @@ public FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean @VisibleForTesting public static void tryUpdateMetaTableDescriptor(Configuration conf) throws IOException { tryUpdateAndGetMetaTableDescriptor(conf, CommonFSUtils.getCurrentFileSystem(conf), - CommonFSUtils.getRootDir(conf), null); + CommonFSUtils.getRootDir(conf)); } public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration conf, - FileSystem fs, Path rootdir, - Function metaObserver) throws IOException { + FileSystem fs, Path rootdir) throws IOException { // see if we already have meta descriptor on fs. Write one if not. try { return getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME); } catch (TableInfoMissingException e) { TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf); - if (metaObserver != null) { - builder = metaObserver.apply(builder); - } TableDescriptor td = builder.build(); LOG.info("Creating new hbase:meta table descriptor {}", td); TableName tableName = td.getTableName(); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 25b5979ae8c8..b800e72f37b7 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -155,8 +155,8 @@ Table table = master.getConnection().getTable(TableName.valueOf(fqtn)); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf); - int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int numMetaReplicas = + master.getTableDescriptors().get(TableName.META_TABLE_NAME).getRegionReplication(); Map frags = null; if (showFragmentation) { frags = FSUtils.getTableFragmentation(master); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java index 89f287bed1b3..09a081317517 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java @@ -47,8 +47,8 @@ public abstract class AbstractTestRegionLocator { protected static byte[][] SPLIT_KEYS; protected static void startClusterAndCreateTable() throws Exception { - UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, REGION_REPLICATION); UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, REGION_REPLICATION); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(REGION_REPLICATION) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); @@ -59,7 +59,7 @@ protected static void startClusterAndCreateTable() throws Exception { UTIL.getAdmin().createTable(td, SPLIT_KEYS); UTIL.waitTableAvailable(TABLE_NAME); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) { + ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java index 78e3e541e895..8cfad19f66dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java @@ -26,9 +26,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; @@ -56,12 +56,13 @@ public class MetaWithReplicasTestBase { protected static void startCluster() throws Exception { TEST_UTIL.getConfiguration().setInt("zookeeper.session.timeout", 30000); - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TEST_UTIL.getConfiguration() .setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); StartMiniClusterOption option = StartMiniClusterOption.builder().numAlwaysStandByMasters(1) .numMasters(1).numRegionServers(REGIONSERVERS_COUNT).build(); TEST_UTIL.startMiniCluster(option); + Admin admin = TEST_UTIL.getAdmin(); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 3); AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); Set sns = new HashSet(); ServerName hbaseMetaServerName = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java index 8e562bd984c7..a2466a5cd7fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java @@ -20,13 +20,13 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLocations; @@ -44,10 +44,10 @@ private RegionReplicaTestHelper() { // waits for all replicas to have region location static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtility util, - ConnectionRegistry registry) { + ConnectionRegistry registry) throws IOException { Configuration conf = util.getConfiguration(); - int regionReplicaCount = util.getConfiguration().getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); + int regionReplicaCount = + util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); Waiter.waitFor(conf, conf.getLong("hbase.client.sync.wait.timeout.msec", 60000), 200, true, new ExplainingPredicate() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index 50111f7eddd5..3596f1c0025d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -28,6 +28,7 @@ import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -51,12 +52,11 @@ public class TestAsyncAdminWithRegionReplicas extends TestAsyncAdminBase { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TestAsyncAdminBase.setUpBeforeClass(); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) { - RegionReplicaTestHelper - .waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) { + RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index 003bef33a994..733787773aa0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -22,7 +22,6 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; @@ -50,8 +49,8 @@ public class TestAsyncMetaRegionLocator { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TEST_UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); TEST_UTIL.waitUntilNoRegionsInTransition(); REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 3485955b66a5..47d6ddb42947 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertArrayEquals; + import java.io.IOException; import java.util.Optional; import java.util.concurrent.ExecutionException; @@ -86,11 +87,11 @@ public void preScannerOpen(ObserverContext c, Scan @BeforeClass public static void setUp() throws Exception { Configuration conf = UTIL.getConfiguration(); - conf.setInt(HConstants.META_REPLICAS_NUM, 3); conf.setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FailPrimaryMetaScanCp.class.getName()); UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); try (ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf)) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java new file mode 100644 index 000000000000..15815e40d6b9 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MiscTests.class, MediumTests.class }) +public class TestCleanupMetaReplica extends MetaWithReplicasTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCleanupMetaReplica.class); + + @BeforeClass + public static void setUp() throws Exception { + startCluster(); + } + + @Test + public void testReplicaCleanup() throws Exception { + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + List metaReplicaZnodes = zkw.getMetaReplicaNodes(); + assertEquals(3, metaReplicaZnodes.size()); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 1); + metaReplicaZnodes = zkw.getMetaReplicaNodes(); + assertEquals(1, metaReplicaZnodes.size()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java new file mode 100644 index 000000000000..66f2df6bbc8c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Make sure we will honor the {@link HConstants#META_REPLICAS_NUM}. + */ +@Category({ MiscTests.class, MediumTests.class }) +public class TestCleanupMetaReplicaThroughConfig extends MetaWithReplicasTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCleanupMetaReplicaThroughConfig.class); + + @BeforeClass + public static void setUp() throws Exception { + startCluster(); + } + + @Test + public void testReplicaCleanup() throws Exception { + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + List metaReplicaZnodes = zkw.getMetaReplicaNodes(); + assertEquals(3, metaReplicaZnodes.size()); + + final HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); + master.stop("Restarting"); + TEST_UTIL.waitFor(30000, () -> master.isStopped()); + TEST_UTIL.getMiniHBaseCluster().getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 1); + + JVMClusterUtil.MasterThread newMasterThread = TEST_UTIL.getMiniHBaseCluster().startMaster(); + final HMaster newMaster = newMasterThread.getMaster(); + + // wait until new master finished meta replica assignment logic + TEST_UTIL.waitFor(30000, () -> newMaster.getMasterQuotaManager() != null); + TEST_UTIL.waitFor(30000, + () -> TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes().size() == 1); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java index 0c26d7934ffd..18235ebbcf96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java @@ -22,18 +22,23 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; -import java.util.concurrent.Future; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.AfterClass; @@ -53,7 +58,8 @@ public class TestFailedMetaReplicaAssigment { @BeforeClass public static void setUp() throws Exception { - // using our rigged master, to force a failed meta replica assignment + // using our rigged master, to force a failed meta replica assignment when start up master + // this test can be removed once we remove the HConstants.META_REPLICAS_NUM config. Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt(HConstants.META_REPLICAS_NUM, 3); StartMiniClusterOption option = StartMiniClusterOption.builder().numAlwaysStandByMasters(1) @@ -100,9 +106,20 @@ public void testFailedReplicaAssignment() throws InterruptedException { } public static class BrokenTransitRegionStateProcedure extends TransitRegionStateProcedure { - protected BrokenTransitRegionStateProcedure() { + + public BrokenTransitRegionStateProcedure() { super(null, null, null, false, TransitionType.ASSIGN); } + + public BrokenTransitRegionStateProcedure(MasterProcedureEnv env, RegionInfo hri) { + super(env, hri, null, false, TransitionType.ASSIGN); + } + + @Override + protected Procedure[] execute(MasterProcedureEnv env) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throw new ProcedureSuspendedException("Never end procedure!"); + } } public static class BrokenMetaReplicaMaster extends HMaster { @@ -124,12 +141,24 @@ public BrokenMasterMetaAssignmentManager(final MasterServices master) { this.master = master; } - public Future assignAsync(RegionInfo regionInfo, ServerName sn) throws IOException { - RegionStateNode regionNode = getRegionStates().getOrCreateRegionStateNode(regionInfo); - if (regionNode.getRegionInfo().getReplicaId() == 2) { - regionNode.setProcedure(new BrokenTransitRegionStateProcedure()); + @Override + public TransitRegionStateProcedure[] createAssignProcedures(List hris) { + List procs = new ArrayList<>(); + for (RegionInfo hri : hris) { + if (hri.isMetaRegion() && hri.getReplicaId() == 2) { + RegionStateNode regionNode = getRegionStates().getOrCreateRegionStateNode(hri); + regionNode.lock(); + try { + procs.add(regionNode.setProcedure(new BrokenTransitRegionStateProcedure( + master.getMasterProcedureExecutor().getEnvironment(), hri))); + } finally { + regionNode.unlock(); + } + } else { + procs.add(super.createAssignProcedures(Collections.singletonList(hri))[0]); + } } - return super.assignAsync(regionInfo, sn); + return procs.toArray(new TransitRegionStateProcedure[0]); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java new file mode 100644 index 000000000000..77a2a0c4423e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Make sure we will honor the {@link HConstants#META_REPLICAS_NUM}.And also test upgrading. + */ +@Category({ MiscTests.class, MediumTests.class }) +public class TestIncreaseMetaReplicaThroughConfig extends MetaWithReplicasTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIncreaseMetaReplicaThroughConfig.class); + + @BeforeClass + public static void setUp() throws Exception { + startCluster(); + } + + @Test + public void testUpgradeAndIncreaseReplicaCount() throws Exception { + HMaster oldMaster = TEST_UTIL.getMiniHBaseCluster().getMaster(); + TableDescriptors oldTds = oldMaster.getTableDescriptors(); + TableDescriptor oldMetaTd = oldTds.get(TableName.META_TABLE_NAME); + assertEquals(3, oldMetaTd.getRegionReplication()); + // force update the replica count to 1 and then kill the master, to simulate that hen upgrading, + // we have no region replication in meta table descriptor but we actually have meta region + // replicas + oldTds.update(TableDescriptorBuilder.newBuilder(oldMetaTd).setRegionReplication(1).build()); + oldMaster.stop("Restarting"); + TEST_UTIL.waitFor(30000, () -> oldMaster.isStopped()); + + // increase replica count to 5 through Configuration + TEST_UTIL.getMiniHBaseCluster().getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 5); + TEST_UTIL.getMiniHBaseCluster().startMaster(); + TEST_UTIL.waitFor(30000, + () -> TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes().size() == 5); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index ba875c5b95fc..359ad61c4b79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -45,6 +45,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @Category({ MediumTests.class, ClientTests.class }) @@ -57,10 +58,10 @@ public class TestMasterRegistry { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(META_REPLICAS_NUM, 3); StartMiniClusterOption.Builder builder = StartMiniClusterOption.builder(); builder.numMasters(3).numRegionServers(3); TEST_UTIL.startMiniCluster(builder.build()); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index abaf0920ce10..d42c1240f9ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; + import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MetaRegionLocationCache; import org.apache.hadoop.hbase.master.RegionState; @@ -57,8 +59,8 @@ public class TestMetaRegionLocationCache { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TEST_UTIL.startMiniCluster(3); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); TEST_UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java index 91754eb35b73..8ffbe6bb47fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java @@ -20,17 +20,15 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -82,27 +80,6 @@ public void testZookeeperNodesForReplicas() throws Exception { } } - @Test - public void testReplicaCleanup() throws Exception { - ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); - List metaReplicaZnodes = zkw.getMetaReplicaNodes(); - assertEquals(3, metaReplicaZnodes.size()); - - final HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - master.stop("Restarting"); - TEST_UTIL.waitFor(30000, () -> master.isStopped()); - TEST_UTIL.getMiniHBaseCluster().getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 1); - - JVMClusterUtil.MasterThread newMasterThread = TEST_UTIL.getMiniHBaseCluster().startMaster(); - final HMaster newMaster = newMasterThread.getMaster(); - - //wait until new master finished meta replica assignment logic - TEST_UTIL.waitFor(30000, () -> newMaster.getMasterQuotaManager() != null); - zkw = TEST_UTIL.getZooKeeperWatcher(); - metaReplicaZnodes = zkw.getMetaReplicaNodes(); - assertEquals(1, metaReplicaZnodes.size()); - - } @Test public void testAccessingUnknownTables() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 3215ea8b64a7..491612c6be95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -249,9 +249,6 @@ public static void beforeClass() throws Exception { HTU.getConfiguration().setInt("hbase.client.primaryCallTimeout.get", 1000000); HTU.getConfiguration().setInt("hbase.client.primaryCallTimeout.scan", 1000000); - // Enable meta replica at server side - HTU.getConfiguration().setInt("hbase.meta.replica.count", 2); - // Make sure master does not host system tables. HTU.getConfiguration().set("hbase.balancer.tablesOnMaster", "none"); @@ -263,6 +260,9 @@ public static void beforeClass() throws Exception { META_SCAN_TIMEOUT_IN_MILLISEC * 1000); HTU.startMiniCluster(NB_SERVERS); + // Enable meta replica at server side + HBaseTestingUtility.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, 2); + HTU.getHBaseCluster().startMaster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 137cb28573a3..427222f8e40c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -25,6 +24,7 @@ import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; @@ -62,13 +62,9 @@ public class TestZKConnectionRegistry { @BeforeClass public static void setUp() throws Exception { - TEST_UTIL.getConfiguration().setInt(META_REPLICAS_NUM, 3); TEST_UTIL.startMiniCluster(3); - Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - // make sure that we do not depend on this config when getting locations for meta replicas, see - // HBASE-21658. - conf.setInt(META_REPLICAS_NUM, 1); - REGISTRY = new ZKConnectionRegistry(conf); + HBaseTestingUtility.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java index 85eac4014f08..3d36db71242a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java @@ -64,10 +64,10 @@ boolean blockUntilBecomingActiveMaster(int checkInterval, MonitoredTask startupS if (MasterAddressTracker.getMasterAddress(watcher) != null) { clusterHasActiveMaster.set(true); } - Threads.sleepWithoutInterrupt(100); } catch (IOException e) { // pass, we will get notified when some other active master creates the znode. } + Threads.sleepWithoutInterrupt(1000); } catch (KeeperException e) { master.abort("Received an unexpected KeeperException, aborting", e); return false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java index 7d6c55b5c908..8754172ba03a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java @@ -71,7 +71,7 @@ public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedExce RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO; // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR); - FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR, null); + FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR); HRegion r = HBaseTestingUtility.createRegionAndWAL(ri, ROOT_DIR, CONF, fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. From 84b5155c8d419a165d81c17498424b15494f0f5a Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 13 Oct 2020 09:44:18 +0800 Subject: [PATCH 445/769] HBASE-25169 Update documentation about meta region replica (#2528) Signed-off-by: Michael Stack --- src/main/asciidoc/_chapters/architecture.adoc | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index bc4af05b4afc..7cf151d2892c 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -2753,8 +2753,6 @@ See the above HDFS Architecture link for more information. [[arch.timelineconsistent.reads]] == Timeline-consistent High Available Reads -NOTE: The current <> does not work well with region replica, so this feature maybe broken. Use it with caution. - [[casestudies.timelineconsistent.intro]] === Introduction @@ -2920,7 +2918,7 @@ Instead you can change the number of region replicas per table to increase or de hbase.regionserver.meta.storefile.refresh.period 300000 - The period (in milliseconds) for refreshing the store files for the hbase:meta tables secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region (there is no notification mechanism). But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting. This should be a non-zero number if meta replicas are enabled (via hbase.meta.replica.count set to greater than 1). + The period (in milliseconds) for refreshing the store files for the hbase:meta tables secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region (there is no notification mechanism). But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting. This should be a non-zero number if meta replicas are enabled. @@ -2953,15 +2951,6 @@ Instead you can change the number of region replicas per table to increase or de The period (in milliseconds) to keep store files in the archive folder before deleting them from the file system. - - hbase.meta.replica.count - 3 - - Region replication count for the meta regions. Defaults to 1. - - - - hbase.region.replica.storefile.refresh.memstore.multiplier 4 From dfe68819af4a595090cad4ece8ca5716edda1a12 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Mon, 12 Oct 2020 19:18:47 -0700 Subject: [PATCH 446/769] "HBASE-25144 Add Hadoop-3.3.0 to personality hadoopcheck" try again (#2536) Due to HBASE-23834, HBASE-19256, HADOOP-16152, HBase 2.2.x and 2.3.x cannot run on Hadoop 3.3.0, or any Hadoop version that has upgraded to Jetty 9.4. Signed-off-by: Sean Busbey Signed-off-by: stack Signed-off-by: Guanghao Zhang Signed-off-by: Duo Zhang --- dev-support/hbase-personality.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 95bbd44bdadc..9b39ead6bc07 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -609,8 +609,15 @@ function hadoopcheck_rebuild else hbase_hadoop3_versions="3.0.3 3.1.1 3.1.2" fi + elif [[ "${PATCH_BRANCH}" = branch-2.2 ]] || [[ "${PATCH_BRANCH}" = branch-2.3 ]]; then + yetus_info "Setting Hadoop 3 versions to test based on branch-2.2/branch-2.3 rules" + if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then + hbase_hadoop3_versions="3.1.2 3.2.1" + else + hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1" + fi else - yetus_info "Setting Hadoop 3 versions to test based on branch-2.2+/master/feature branch rules" + yetus_info "Setting Hadoop 3 versions to test based on branch-2.4+/master/feature branch rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then hbase_hadoop3_versions="3.1.2 3.2.1 3.3.0" else From a0998cfb6ab13778e1feaa9234d8afe29f7f0626 Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Thu, 8 Oct 2020 10:02:27 -0500 Subject: [PATCH 447/769] HBASE-14067 bundle ruby files for hbase shell into a jar. * removes some cruft from the hbase-shell pom that appears to be from coping the hbase-server pom long ago * puts the ruby scripts into the hbase-shell jar following the guide from jruby for packaging * removes hard coding the location of the implementation scripts from our runtime * removes hard coding the load path for the implementation from the test code (leaves hard coding the test code location) * provides a work around for a name conflict between our shell and the ruby stdlib shell. closes #2515 Signed-off-by: Michael Stack --- bin/hbase | 13 +- bin/hirb.rb | 223 +---------------- .../src/main/assembly/client-components.xml | 7 - .../src/main/assembly/components.xml | 7 - hbase-shell/pom.xml | 54 +--- hbase-shell/src/main/ruby/hbase_shell.rb | 24 ++ hbase-shell/src/main/ruby/jar-bootstrap.rb | 235 ++++++++++++++++++ .../hbase/client/AbstractTestShell.java | 38 ++- .../hadoop/hbase/client/TestAdminShell.java | 8 +- .../hadoop/hbase/client/TestAdminShell2.java | 8 +- .../hadoop/hbase/client/TestQuotasShell.java | 8 +- .../hadoop/hbase/client/TestRSGroupShell.java | 8 +- .../hbase/client/TestReplicationShell.java | 8 +- .../apache/hadoop/hbase/client/TestShell.java | 10 +- .../hbase/client/TestShellNoCluster.java | 13 +- .../hadoop/hbase/client/TestTableShell.java | 8 +- .../client/rsgroup/TestShellRSGroups.java | 8 +- .../src/test/ruby/hbase/admin2_test.rb | 2 +- hbase-shell/src/test/ruby/hbase/admin_test.rb | 2 +- .../hbase/list_regions_test_no_cluster.rb | 2 +- .../src/test/ruby/hbase/quotas_test.rb | 2 +- .../test/ruby/hbase/quotas_test_no_cluster.rb | 2 +- .../test/ruby/hbase/replication_admin_test.rb | 2 +- .../test/ruby/hbase/security_admin_test.rb | 2 +- .../ruby/hbase/test_connection_no_cluster.rb | 2 +- .../hbase/visibility_labels_admin_test.rb | 2 +- .../src/test/ruby/shell/commands_test.rb | 2 +- .../src/test/ruby/shell/converter_test.rb | 4 +- .../src/test/ruby/shell/list_locks_test.rb | 2 +- .../test/ruby/shell/list_procedures_test.rb | 2 +- .../test/ruby/shell/noninteractive_test.rb | 2 +- .../src/test/ruby/shell/rsgroup_shell_test.rb | 2 +- hbase-shell/src/test/ruby/shell/shell_test.rb | 2 +- hbase-shell/src/test/ruby/test_helper.rb | 6 +- hbase-shell/src/test/ruby/tests_runner.rb | 3 + 35 files changed, 369 insertions(+), 354 deletions(-) create mode 100644 hbase-shell/src/main/ruby/hbase_shell.rb create mode 100644 hbase-shell/src/main/ruby/jar-bootstrap.rb diff --git a/bin/hbase b/bin/hbase index dd6cfeef644f..d2307c50c33a 100755 --- a/bin/hbase +++ b/bin/hbase @@ -509,13 +509,22 @@ fi # figure out which class to run if [ "$COMMAND" = "shell" ] ; then #find the hbase ruby sources + # assume we are in a binary install if lib/ruby exists if [ -d "$HBASE_HOME/lib/ruby" ]; then - HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby" + # We want jruby to consume these things rather than our bootstrap script; + # jruby will look for the env variable 'JRUBY_OPTS'. + JRUBY_OPTS="${JRUBY_OPTS} -X+O" + export JRUBY_OPTS + # hbase-shell.jar contains a 'jar-bootstrap.rb' + # for more info see + # https://github.com/jruby/jruby/wiki/StandaloneJarsAndClasses#standalone-executable-jar-files + CLASS="org.jruby.JarBootstrapMain" + # otherwise assume we are running in a source checkout else HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby" + CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/hbase-shell/src/main/ruby/jar-bootstrap.rb" fi HBASE_OPTS="$HBASE_OPTS $HBASE_SHELL_OPTS" - CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb" elif [ "$COMMAND" = "hbck" ] ; then # Look for the -j /path/to/HBCK2.jar parameter. Else pass through to hbck. case "${1}" in diff --git a/bin/hirb.rb b/bin/hirb.rb index 7b1b8f172c25..12353ca1a0ec 100644 --- a/bin/hirb.rb +++ b/bin/hirb.rb @@ -1,5 +1,3 @@ -# -# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -15,217 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# File passed to org.jruby.Main by bin/hbase. Pollutes jirb with hbase imports -# and hbase commands and then loads jirb. Outputs a banner that tells user -# where to find help, shell version, and loads up a custom hirb. -# -# In noninteractive mode, runs commands from stdin until completion or an error. -# On success will exit with status 0, on any problem will exit non-zero. Callers -# should only rely on "not equal to 0", because the current error exit code of 1 -# will likely be updated to diffentiate e.g. invalid commands, incorrect args, -# permissions, etc. - -# TODO: Interrupt a table creation or a connection to a bad master. Currently -# has to time out. Below we've set down the retries for rpc and hbase but -# still can be annoying (And there seem to be times when we'll retry for -# ever regardless) -# TODO: Add support for listing and manipulating catalog tables, etc. -# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes - -# Run the java magic include and import basic HBase types that will help ease -# hbase hacking. -include Java - -# Some goodies for hirb. Should these be left up to the user's discretion? -require 'irb/completion' -require 'pathname' - -# Add the directory names in hbase.jruby.sources commandline option -# to the ruby load path so I can load up my HBase ruby modules -sources = java.lang.System.getProperty('hbase.ruby.sources') -$LOAD_PATH.unshift Pathname.new(sources) - -# -# FIXME: Switch args processing to getopt -# -# See if there are args for this shell. If any, read and then strip from ARGV -# so they don't go through to irb. Output shell 'usage' if user types '--help' -cmdline_help = <hbase-config.cmd - - - ${project.basedir}/../hbase-shell/src/main/ruby - lib/ruby - 0644 - 0755 - ${project.basedir}/../hbase-server/target/native diff --git a/hbase-assembly/src/main/assembly/components.xml b/hbase-assembly/src/main/assembly/components.xml index aaa6a831ad59..3e1394e7d5b1 100644 --- a/hbase-assembly/src/main/assembly/components.xml +++ b/hbase-assembly/src/main/assembly/components.xml @@ -69,13 +69,6 @@ **/*.cmd - - - ${project.basedir}/../hbase-shell/src/main/ruby - lib/ruby - 0644 - 0755 - ${project.basedir}/../hbase-server/target/hbase-webapps diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index b6d2e30388a8..63db97719c67 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -30,15 +30,9 @@ Apache HBase - Shell Shell for HBase - - - ${project.build.directory} - - hbase-webapps/** - + src/main/ruby @@ -50,38 +44,15 @@ - - - - maven-assembly-plugin - - true - - org.apache.maven.plugins maven-jar-plugin - org/apache/hadoop/hbase/mapreduce/Driver + org.jruby.JarBootstrapMain - - - org/apache/jute/** - org/apache/zookeeper/** - **/*.jsp - hbase-site.xml - hdfs-site.xml - log4j.properties - mapred-queues.xml - mapred-site.xml - @@ -89,27 +60,6 @@ org.apache.maven.plugins maven-source-plugin - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-jamon - ${project.build.directory}/generated-sources/java - - - - - net.revelc.code diff --git a/hbase-shell/src/main/ruby/hbase_shell.rb b/hbase-shell/src/main/ruby/hbase_shell.rb new file mode 100644 index 000000000000..e5e85ab68208 --- /dev/null +++ b/hbase-shell/src/main/ruby/hbase_shell.rb @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ruby has a stdlib named 'shell' so using "require 'shell'" does not +# work if our shell implementation is not on the local filesystem. +# this is the absolute path to our shell implementation when packaged +# in a jar. The level of indirection provided by this file lets things +# still behave the same as in earlier releases if folks unpackage the +# jar contents onto the local filesystem if they need that for some +# other reason. +require 'uri:classloader:/shell.rb' diff --git a/hbase-shell/src/main/ruby/jar-bootstrap.rb b/hbase-shell/src/main/ruby/jar-bootstrap.rb new file mode 100644 index 000000000000..de602bf551d1 --- /dev/null +++ b/hbase-shell/src/main/ruby/jar-bootstrap.rb @@ -0,0 +1,235 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File passed to org.jruby.Main by bin/hbase. Pollutes jirb with hbase imports +# and hbase commands and then loads jirb. Outputs a banner that tells user +# where to find help, shell version, and loads up a custom hirb. +# +# In noninteractive mode, runs commands from stdin until completion or an error. +# On success will exit with status 0, on any problem will exit non-zero. Callers +# should only rely on "not equal to 0", because the current error exit code of 1 +# will likely be updated to diffentiate e.g. invalid commands, incorrect args, +# permissions, etc. + +# TODO: Interrupt a table creation or a connection to a bad master. Currently +# has to time out. Below we've set down the retries for rpc and hbase but +# still can be annoying (And there seem to be times when we'll retry for +# ever regardless) +# TODO: Add support for listing and manipulating catalog tables, etc. +# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes + +# Run the java magic include and import basic HBase types that will help ease +# hbase hacking. +include Java + +# Some goodies for hirb. Should these be left up to the user's discretion? +require 'irb/completion' +require 'pathname' + +# Add the directory names in hbase.jruby.sources commandline option +# to the ruby load path so I can load up my HBase ruby modules +# in case we are trying to get them out of source instead of jar +# packaging. +sources = java.lang.System.getProperty('hbase.ruby.sources') +unless sources.nil? + $LOAD_PATH.unshift Pathname.new(sources) +end + +# +# FIXME: Switch args processing to getopt +# +# See if there are args for this shell. If any, read and then strip from ARGV +# so they don't go through to irb. Output shell 'usage' if user types '--help' +cmdline_help = < loadPaths = new ArrayList<>(2); - loadPaths.add("src/main/ruby"); loadPaths.add("src/test/ruby"); jruby.setLoadPaths(loadPaths); jruby.put("$TEST_CLUSTER", TEST_UTIL); @@ -65,6 +71,34 @@ protected static void setUpJRubyRuntime() { System.setProperty("jruby.native.verbose", "true"); } + /** + * @return comma separated list of ruby script names for tests + */ + protected String getIncludeList() { + return ""; + } + + /** + * @return comma separated list of ruby script names for tests to skip + */ + protected String getExcludeList() { + return ""; + } + + @Test + public void testRunShellTests() throws IOException { + final String tests = getIncludeList(); + final String excludes = getExcludeList(); + if (!tests.isEmpty()) { + System.setProperty("shell.test.include", tests); + } + if (!excludes.isEmpty()) { + System.setProperty("shell.test.exclude", excludes); + } + LOG.info("Starting ruby tests. includes: {} excludes: {}", tests, excludes); + jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + } + @BeforeClass public static void setUpBeforeClass() throws Exception { setUpConfig(); diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java index 1835d88dc5cf..7cfd603ddb8e 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell.java @@ -33,10 +33,8 @@ public class TestAdminShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdminShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "admin_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "admin_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java index e2dadd059346..b94a579ea322 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell2.java @@ -33,10 +33,8 @@ public class TestAdminShell2 extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdminShell2.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "admin2_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "admin2_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java index 482bf0f46bf6..f2bb06f7369d 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestQuotasShell.java @@ -33,10 +33,8 @@ public class TestQuotasShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestQuotasShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "quotas_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "quotas_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java index f26f9f53375b..a2bc6a47c120 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestRSGroupShell.java @@ -47,10 +47,8 @@ public static void setUpBeforeClass() throws Exception { setUpJRubyRuntime(); } - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "rsgroup_shell_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "rsgroup_shell_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java index ca371e145b98..146a73fa0e33 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java @@ -33,10 +33,8 @@ public class TestReplicationShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestReplicationShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "replication_admin_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "replication_admin_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java index 8f3aefba1808..434d8cf6709b 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java @@ -32,11 +32,9 @@ public class TestShell extends AbstractTestShell { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.exclude", "replication_admin_test.rb,rsgroup_shell_test.rb," + - "admin_test.rb,table_test.rb,quotas_test.rb,admin2_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getExcludeList() { + return "replication_admin_test.rb,rsgroup_shell_test.rb,admin_test.rb,table_test.rb," + + "quotas_test.rb,admin2_test.rb"; } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java index 3172e973b76c..1bea652923ae 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java @@ -30,8 +30,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @Category({ ClientTests.class, MediumTests.class }) public class TestShellNoCluster extends AbstractTestShell { + private static final Logger LOG = LoggerFactory.getLogger(TestShellNoCluster.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -41,7 +45,6 @@ public class TestShellNoCluster extends AbstractTestShell { public static void setUpBeforeClass() throws Exception { // no cluster List loadPaths = new ArrayList<>(2); - loadPaths.add("src/main/ruby"); loadPaths.add("src/test/ruby"); jruby.setLoadPaths(loadPaths); jruby.put("$TEST_CLUSTER", TEST_UTIL); @@ -55,9 +58,11 @@ public static void tearDownAfterClass() throws Exception { // no cluster } + // Keep the same name so we override the with-a-cluster test + @Override @Test - public void testRunNoClusterShellTests() throws IOException { - // Start ruby tests without cluster - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/no_cluster_tests_runner.rb"); + public void testRunShellTests() throws IOException { + LOG.info("Start ruby tests without cluster"); + jruby.runScriptlet(PathType.CLASSPATH, "no_cluster_tests_runner.rb"); } } diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java index e2fdcaa67fcd..26369349e1e5 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestTableShell.java @@ -33,10 +33,8 @@ public class TestTableShell extends AbstractTestShell { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTableShell.class); - @Test - public void testRunShellTests() throws IOException { - System.setProperty("shell.test.include", "table_test.rb"); - // Start all ruby tests - jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + @Override + protected String getIncludeList() { + return "test_table.rb"; } } diff --git a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java index 9c28cbf5b75b..380ad6163228 100644 --- a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java +++ b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java @@ -52,11 +52,9 @@ public class TestShellRSGroups { final Logger LOG = LoggerFactory.getLogger(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static ScriptingContainer jruby = new ScriptingContainer(); - private static String basePath; @BeforeClass public static void setUpBeforeClass() throws Exception { - basePath = System.getProperty("basedir"); // Start mini cluster TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); @@ -80,8 +78,7 @@ public static void setUpBeforeClass() throws Exception { // Configure jruby runtime List loadPaths = new ArrayList<>(2); - loadPaths.add(basePath+"/src/main/ruby"); - loadPaths.add(basePath+"/src/test/ruby"); + loadPaths.add("src/test/ruby"); jruby.setLoadPaths(loadPaths); jruby.put("$TEST_CLUSTER", TEST_UTIL); System.setProperty("jruby.jit.logging.verbose", "true"); @@ -99,8 +96,7 @@ public void testRunShellTests() throws IOException { try { // Start only GroupShellTest System.setProperty("shell.test", "Hbase::RSGroupShellTest"); - jruby.runScriptlet(PathType.ABSOLUTE, - basePath + "/src/test/ruby/tests_runner.rb"); + jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); } finally { System.clearProperty("shell.test"); } diff --git a/hbase-shell/src/test/ruby/hbase/admin2_test.rb b/hbase-shell/src/test/ruby/hbase/admin2_test.rb index 9d3834ee667f..8d368188ae18 100644 --- a/hbase-shell/src/test/ruby/hbase/admin2_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin2_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index 65e3e0a6a2d6..fac52ede51b7 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb b/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb index 6be259779d95..75a3c0ef252f 100644 --- a/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb +++ b/hbase-shell/src/test/ruby/hbase/list_regions_test_no_cluster.rb @@ -15,7 +15,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' java_import 'org.apache.hadoop.hbase.HRegionLocation' diff --git a/hbase-shell/src/test/ruby/hbase/quotas_test.rb b/hbase-shell/src/test/ruby/hbase/quotas_test.rb index c4fca28bdfdc..6e506c52f14a 100644 --- a/hbase-shell/src/test/ruby/hbase/quotas_test.rb +++ b/hbase-shell/src/test/ruby/hbase/quotas_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb b/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb index 79f735021a8a..471a81013dd8 100644 --- a/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb +++ b/hbase-shell/src/test/ruby/hbase/quotas_test_no_cluster.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb index 72fbe943040f..c6ed817ad4ea 100644 --- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' diff --git a/hbase-shell/src/test/ruby/hbase/security_admin_test.rb b/hbase-shell/src/test/ruby/hbase/security_admin_test.rb index 6e9a50cafcd1..8839c33dabce 100644 --- a/hbase-shell/src/test/ruby/hbase/security_admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/security_admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' diff --git a/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb b/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb index 361937634c3c..6969a3613e44 100644 --- a/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb +++ b/hbase-shell/src/test/ruby/hbase/test_connection_no_cluster.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'stringio' require 'hbase_constants' require 'hbase/hbase' diff --git a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb index e69710d69a53..b59b9b9065fa 100644 --- a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'shell' +require 'hbase_shell' require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' diff --git a/hbase-shell/src/test/ruby/shell/commands_test.rb b/hbase-shell/src/test/ruby/shell/commands_test.rb index 0fc3dab4110f..c97931ff20f8 100644 --- a/hbase-shell/src/test/ruby/shell/commands_test.rb +++ b/hbase-shell/src/test/ruby/shell/commands_test.rb @@ -19,7 +19,7 @@ require 'hbase_constants' require 'hbase/table' -require 'shell' +require 'hbase_shell' ## # Tests whether all registered commands have a help and command method diff --git a/hbase-shell/src/test/ruby/shell/converter_test.rb b/hbase-shell/src/test/ruby/shell/converter_test.rb index 51e674093f25..34999ea00715 100644 --- a/hbase-shell/src/test/ruby/shell/converter_test.rb +++ b/hbase-shell/src/test/ruby/shell/converter_test.rb @@ -15,7 +15,7 @@ # limitations under the License. require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class ConverterTest < Test::Unit::TestCase @@ -153,4 +153,4 @@ def teardown assert(!output.include?(hex_column)) end end -end \ No newline at end of file +end diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb b/hbase-shell/src/test/ruby/shell/list_locks_test.rb index 6d291a5000fd..20a910c485dd 100644 --- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb +++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class ListLocksTest < Test::Unit::TestCase diff --git a/hbase-shell/src/test/ruby/shell/list_procedures_test.rb b/hbase-shell/src/test/ruby/shell/list_procedures_test.rb index 2bf5824c0ee3..a9a38fe9e734 100644 --- a/hbase-shell/src/test/ruby/shell/list_procedures_test.rb +++ b/hbase-shell/src/test/ruby/shell/list_procedures_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class ListProceduresTest < Test::Unit::TestCase diff --git a/hbase-shell/src/test/ruby/shell/noninteractive_test.rb b/hbase-shell/src/test/ruby/shell/noninteractive_test.rb index 0fae4cbb79a7..fa8dd333be0d 100644 --- a/hbase-shell/src/test/ruby/shell/noninteractive_test.rb +++ b/hbase-shell/src/test/ruby/shell/noninteractive_test.rb @@ -15,7 +15,7 @@ # limitations under the License. # require 'hbase_constants' -require 'shell' +require 'hbase_shell' class NonInteractiveTest < Test::Unit::TestCase def setup diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb index 33a6c498bec7..e8ba851b20b8 100644 --- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb +++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' module Hbase class RSGroupShellTest < Test::Unit::TestCase diff --git a/hbase-shell/src/test/ruby/shell/shell_test.rb b/hbase-shell/src/test/ruby/shell/shell_test.rb index 9be6bfba1414..b16aef385408 100644 --- a/hbase-shell/src/test/ruby/shell/shell_test.rb +++ b/hbase-shell/src/test/ruby/shell/shell_test.rb @@ -18,7 +18,7 @@ # require 'hbase_constants' -require 'shell' +require 'hbase_shell' class ShellTest < Test::Unit::TestCase include Hbase::TestHelpers diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb index 26b142638f04..db014f502787 100644 --- a/hbase-shell/src/test/ruby/test_helper.rb +++ b/hbase-shell/src/test/ruby/test_helper.rb @@ -39,7 +39,7 @@ module Hbase module TestHelpers require 'hbase_constants' require 'hbase/hbase' - require 'shell' + require 'hbase_shell' def setup_hbase hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) @@ -169,7 +169,3 @@ def capture_stdout # Extend standard unit tests with our helpers Test::Unit::TestCase.extend(Testing::Declarative) - -# Add the $HBASE_HOME/lib/ruby directory to the ruby -# load path so I can load up my HBase ruby modules -$LOAD_PATH.unshift File.join(File.dirname(__FILE__), "..", "..", "main", "ruby") diff --git a/hbase-shell/src/test/ruby/tests_runner.rb b/hbase-shell/src/test/ruby/tests_runner.rb index b0a0aaf17636..147d68103f5e 100644 --- a/hbase-shell/src/test/ruby/tests_runner.rb +++ b/hbase-shell/src/test/ruby/tests_runner.rb @@ -21,6 +21,8 @@ require 'rake' require 'set' +puts "Ruby description: #{RUBY_DESCRIPTION}" + unless defined?($TEST_CLUSTER) include Java @@ -68,6 +70,7 @@ next end begin + puts "loading test file '#{filename}'." load(file) rescue => e puts "ERROR: #{e}" From d890451953cd5a5a3dbeb707ff7a8359ae99b6f9 Mon Sep 17 00:00:00 2001 From: XinSun Date: Thu, 15 Oct 2020 01:08:54 +0800 Subject: [PATCH 448/769] HBASE-25117 ReplicationSourceShipper thread can not be finished (#2521) Signed-off-by: Wellington Chevreuil Signed-off-by: stack Signed-off-by: Guanghao Zhang Signed-off-by: Duo Zhang --- .../regionserver/HBaseInterClusterReplicationEndpoint.java | 5 +++-- .../hbase/replication/regionserver/ReplicationSource.java | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index b127b467505d..56bccc09cdc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -193,7 +193,7 @@ private void connectToPeers() { * @param sleepMultiplier by how many times the default sleeping time is augmented * @return True if sleepMultiplier is < maxRetriesMultiplier */ - protected boolean sleepForRetries(String msg, int sleepMultiplier) { + private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { LOG.trace("{} {}, sleeping {} times {}", @@ -201,8 +201,9 @@ protected boolean sleepForRetries(String msg, int sleepMultiplier) { } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { + Thread.currentThread().interrupt(); if (LOG.isDebugEnabled()) { - LOG.debug("{} Interrupted while sleeping between retries", logPeerId()); + LOG.debug("{} {} Interrupted while sleeping between retries", msg, logPeerId()); } } return sleepMultiplier < maxRetriesMultiplier; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index b68e0587d7de..8091d0ce71f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -691,6 +691,9 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, } } + if (this.replicationEndpoint != null) { + this.replicationEndpoint.stop(); + } for (ReplicationSourceShipper worker : workers) { if (worker.isAlive() || worker.entryReader.isAlive()) { try { @@ -711,9 +714,6 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, } } - if (this.replicationEndpoint != null) { - this.replicationEndpoint.stop(); - } if (join) { for (ReplicationSourceShipper worker : workers) { Threads.shutdown(worker, this.sleepForRetries); From 815ffdcaac9ba1ed63e961cbc500a75ac26ac0f1 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 15 Oct 2020 10:22:14 +0800 Subject: [PATCH 449/769] HBASE-25186 TestMasterRegionOnTwoFileSystems is failing after HBASE-25065 (#2544) Signed-off-by: Ramkrishna --- .../hbase/master/region/MasterRegionWALRoller.java | 6 ++++-- .../apache/hadoop/hbase/wal/AbstractWALRoller.java | 12 +++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java index ef3dd121133b..bba6611c68d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionWALRoller.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.AbstractWALRoller; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -68,8 +67,11 @@ private MasterRegionWALRoller(String name, Configuration conf, Abortable abortab } @Override - protected void afterRoll(WAL wal) { + protected void afterWALArchive(Path oldPath, Path newPath) { // move the archived WAL files to the global archive path + // here we do not use the newPath directly, so that even if we fail to move some of the + // newPaths, we are still safe because every time we will get all the files under the archive + // directory. try { MasterRegionUtils.moveFilesUnderDir(fs, walArchiveDir, globalWALArchiveDir, archivedWALSuffix); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index a5a0ee3a3225..4d89c4753844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; @@ -86,6 +87,11 @@ public void logRollRequested(WALActionsListener.RollRequestReason reason) { AbstractWALRoller.this.notifyAll(); } } + + @Override + public void postLogArchive(Path oldPath, Path newPath) throws IOException { + afterWALArchive(oldPath, newPath); + } }); } } @@ -190,7 +196,6 @@ public void run() { LOG.warn("WAL has been closed. Skipping rolling of writer and just remove it", e); iter.remove(); } - afterRoll(wal); } } catch (FailedLogCloseException | ConnectException e) { abort("Failed log close in log roller", e); @@ -206,10 +211,7 @@ public void run() { LOG.info("LogRoller exiting."); } - /** - * Called after we finish rolling the give {@code wal}. - */ - protected void afterRoll(WAL wal) { + protected void afterWALArchive(Path oldPath, Path newPath) { } /** From ef6eed6e6e4d97dba57b9e7cd627c5515a8491f4 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 15 Oct 2020 07:02:53 -0700 Subject: [PATCH 450/769] HBASE-24628 Region normalizer now respects a rate limit (HMaster chore shutdown NPE ADDENDUM) (#2540) Signed-off-by: Michael Stack Signed-off-by: Duo Zhang --- .../main/java/org/apache/hadoop/hbase/master/HMaster.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 85ac5e0b0490..f58096fa5407 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1123,7 +1123,9 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc getChoreService().scheduleChore(clusterStatusChore); this.balancerChore = new BalancerChore(this); getChoreService().scheduleChore(balancerChore); - getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); + if (regionNormalizerManager != null) { + getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore()); + } this.catalogJanitorChore = new CatalogJanitor(this); getChoreService().scheduleChore(catalogJanitorChore); this.hbckChore = new HbckChore(this); @@ -1638,7 +1640,9 @@ private void stopChores() { choreService.cancelChore(this.mobFileCleanerChore); choreService.cancelChore(this.mobFileCompactionChore); choreService.cancelChore(this.balancerChore); - choreService.cancelChore(getRegionNormalizerManager().getRegionNormalizerChore()); + if (regionNormalizerManager != null) { + choreService.cancelChore(regionNormalizerManager.getRegionNormalizerChore()); + } choreService.cancelChore(this.clusterStatusChore); choreService.cancelChore(this.catalogJanitorChore); choreService.cancelChore(this.clusterStatusPublisherChore); From 68eeed950a815d084a8d50ccfad7c89b413e1d7b Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Fri, 16 Oct 2020 12:04:10 +0530 Subject: [PATCH 451/769] HBASE-25179 : Fix Assert format in HFilePerformanceEvaluation class Closes #2551 Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/HFilePerformanceEvaluation.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index 2c4209ce8db8..cbfadb5d22e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -447,7 +447,7 @@ void doRow(int i) throws Exception { // TODO: Fix. Make Scanner do Cells. Cell c = this.scanner.getCell(); PerformanceEvaluationCommons.assertKey(format(i + 1), c); - PerformanceEvaluationCommons.assertValueSize(c.getValueLength(), ROW_LENGTH); + PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } } @@ -478,7 +478,7 @@ void doRow(int i) throws Exception { // TODO: Fix scanner so it does Cells Cell c = scanner.getCell(); PerformanceEvaluationCommons.assertKey(b, c); - PerformanceEvaluationCommons.assertValueSize(c.getValueLength(), ROW_LENGTH); + PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } private byte [] getRandomRow() { @@ -515,7 +515,7 @@ void doRow(int i) throws Exception { return; } c = scanner.getCell(); - PerformanceEvaluationCommons.assertValueSize(c.getValueLength(), ROW_LENGTH); + PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } } From 2b7e23abd948358d6f5baa414260d5654d6e4b07 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 20 Oct 2020 11:51:34 +0800 Subject: [PATCH 452/769] HBASE-25194 Do not publish workspace in flaky find job (#2564) Signed-off-by: Sean Busbey --- .../flaky-tests/flaky-reporting.Jenkinsfile | 7 ++++--- dev-support/flaky-tests/report-flakies.py | 17 ++++++++++++----- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile index 640b1cb54a77..25e3fdeef841 100644 --- a/dev-support/flaky-tests/flaky-reporting.Jenkinsfile +++ b/dev-support/flaky-tests/flaky-reporting.Jenkinsfile @@ -43,7 +43,8 @@ pipeline { flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase%20Nightly/job/${BRANCH_NAME}" --is-yetus True --max-builds 10) flaky_args=("${flaky_args[@]}" --urls "${JENKINS_URL}/job/HBase/job/HBase-Flaky-Tests/job/${BRANCH_NAME}" --is-yetus False --max-builds 30) docker build -t hbase-dev-support dev-support - docker run --ulimit nproc=12500 -v "${WORKSPACE}":/hbase --workdir=/hbase hbase-dev-support python dev-support/flaky-tests/report-flakies.py --mvn -v "${flaky_args[@]}" + docker run --ulimit nproc=12500 -v "${WORKSPACE}":/hbase -u `id -u`:`id -g` --workdir=/hbase hbase-dev-support \ + python dev-support/flaky-tests/report-flakies.py --mvn -v -o output "${flaky_args[@]}" ''' } } @@ -51,13 +52,13 @@ pipeline { post { always { // Has to be relative to WORKSPACE. - archiveArtifacts artifacts: "includes,excludes,dashboard.html" + archiveArtifacts artifacts: "output/*" publishHTML target: [ allowMissing: true, keepAll: true, alwaysLinkToLastBuild: true, // Has to be relative to WORKSPACE - reportDir: ".", + reportDir: "output", reportFiles: 'dashboard.html', reportName: 'Flaky Test Report' ] diff --git a/dev-support/flaky-tests/report-flakies.py b/dev-support/flaky-tests/report-flakies.py index 1b3161af6d83..d29ecfa4da6e 100755 --- a/dev-support/flaky-tests/report-flakies.py +++ b/dev-support/flaky-tests/report-flakies.py @@ -60,6 +60,8 @@ "strings are written to files so they can be saved as artifacts and easily imported in " "other projects. Also writes timeout and failing tests in separate files for " "reference.") +parser.add_argument("-o", "--output", metavar='dir', action='store', required=False, + help="the output directory") parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true") args = parser.parse_args() @@ -68,6 +70,11 @@ if args.verbose: logger.setLevel(logging.INFO) +output_dir = '.' +if args.output is not None: + output_dir = args.output + if not os.path.exists(output_dir): + os.makedirs(output_dir) def get_bad_tests(build_url, is_yetus): """ @@ -257,24 +264,24 @@ def expand_multi_config_projects(cli_args): all_bad_tests = all_hanging_tests.union(all_failed_tests) if args.mvn: includes = ",".join(all_bad_tests) - with open("./includes", "w") as inc_file: + with open(output_dir + "/includes", "w") as inc_file: inc_file.write(includes) excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests] - with open("./excludes", "w") as exc_file: + with open(output_dir + "/excludes", "w") as exc_file: exc_file.write(",".join(excludes)) - with open("./timeout", "w") as timeout_file: + with open(output_dir + "/timeout", "w") as timeout_file: timeout_file.write(",".join(all_timeout_tests)) - with open("./failed", "w") as failed_file: + with open(output_dir + "/failed", "w") as failed_file: failed_file.write(",".join(all_failed_tests)) dev_support_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f: template = Template(f.read()) -with open("dashboard.html", "w") as f: +with open(output_dir + "/dashboard.html", "w") as f: datetime = time.strftime("%m/%d/%Y %H:%M:%S") f.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests), results=url_to_bad_test_results, build_ids=url_to_build_ids)) From ab4efccef473d4536616aca0021b69e01959170f Mon Sep 17 00:00:00 2001 From: Ankit Singhal Date: Mon, 19 Oct 2020 22:22:33 -0700 Subject: [PATCH 453/769] HBASE-25166 MobFileCompactionChore is closing the master's shared cluster connection (#2514) --- .../org/apache/hadoop/hbase/mob/MobFileCompactionChore.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java index 314729833959..dd5d2898ea01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCompactionChore.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.CompactionState; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; @@ -85,9 +84,7 @@ protected void chore() { boolean reported = false; - try (Connection conn = master.getConnection(); - Admin admin = conn.getAdmin();) { - + try (Admin admin = master.getConnection().getAdmin()) { TableDescriptors htds = master.getTableDescriptors(); Map map = htds.getAll(); for (TableDescriptor htd : map.values()) { From 1322724da84cca02dcd9f847785770c60c801d61 Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Tue, 20 Oct 2020 16:58:43 +0800 Subject: [PATCH 454/769] HBASE-25204 Nightly job failed as the name of jdk and maven changed (#2567) Signed-off-by: Duo Zhang --- dev-support/Jenkinsfile | 4 ++-- dev-support/adhoc_run_tests/Jenkinsfile | 4 ++-- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 01d50197fbc5..94535b5b443b 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -593,9 +593,9 @@ pipeline { // TODO (HBASE-23870): replace this with invocation of the release tool stage ('packaging and integration') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } environment { BASEDIR = "${env.WORKSPACE}/component" diff --git a/dev-support/adhoc_run_tests/Jenkinsfile b/dev-support/adhoc_run_tests/Jenkinsfile index e06fdba325a6..476795d50ca8 100644 --- a/dev-support/adhoc_run_tests/Jenkinsfile +++ b/dev-support/adhoc_run_tests/Jenkinsfile @@ -51,10 +51,10 @@ pipeline { stages { stage ('run tests') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch // the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } steps { sh """#!/bin/bash -e diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index 959ae31a0767..a6996bf8bf07 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -34,8 +34,8 @@ pipeline { } tools { // this should match what the yetus nightly job for the branch will use - maven 'Maven (latest)' - jdk "JDK 1.8 (latest)" + maven 'maven_latest' + jdk "jdk_1.8_latest" } stages { stage ('run flaky tests') { From 0ddda38e428501cb40e0b97b426d5fcf2e029b1d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 20 Oct 2020 17:00:06 +0800 Subject: [PATCH 455/769] HBASE-25203 Change the reference url to flaky list in our jenkins jobs (#2566) Signed-off-by: Guanghao Zhang --- dev-support/Jenkinsfile | 2 +- dev-support/Jenkinsfile_GitHub | 2 +- dev-support/flaky-tests/run-flaky-tests.Jenkinsfile | 2 +- dev-support/hbase-personality.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 94535b5b443b..9f23a58873cd 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -49,7 +49,7 @@ pipeline { ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' - EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes" + EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/excludes" // TODO does hadoopcheck need to be jdk specific? SHALLOW_CHECKS = 'all,-shadedjars,-unit' // run by the 'yetus general check' DEEP_CHECKS = 'compile,htmlout,javac,maven,mvninstall,shadedjars,unit' // run by 'yetus jdkX (HadoopY) checks' diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index d314ba45cd9c..a15ee9e84957 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -44,7 +44,7 @@ pipeline { ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' - EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${CHANGE_TARGET}/lastSuccessfulBuild/artifact/excludes" + EXCLUDE_TESTS_URL = "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${CHANGE_TARGET}/lastSuccessfulBuild/artifact/output/excludes" // a global view of paths. parallel stages can land on the same host concurrently, so each // stage works in its own subdirectory. there is an "output" under each of these diff --git a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile index a6996bf8bf07..0ba200ba07f2 100644 --- a/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile +++ b/dev-support/flaky-tests/run-flaky-tests.Jenkinsfile @@ -49,7 +49,7 @@ pipeline { mvn_args=("${mvn_args[@]}" -X) set -x fi - curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/includes" + curl "${curl_args[@]}" -o includes.txt "${JENKINS_URL}/job/HBase/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/output/includes" if [ -s includes.txt ]; then rm -rf local-repository/org/apache/hbase mvn clean "${mvn_args[@]}" diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 9b39ead6bc07..2eee06e1e9ba 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -318,7 +318,7 @@ function get_include_exclude_tests_arg fi else # Use branch specific exclude list when EXCLUDE_TESTS_URL and INCLUDE_TESTS_URL are empty - FLAKY_URL="https://ci-hadoop.apache.org/job/HBase/job/HBase-Find-Flaky-Tests/job/${PATCH_BRANCH}/lastSuccessfulBuild/artifact/excludes/" + FLAKY_URL="https://ci-hadoop.apache.org/job/HBase/job/HBase-Find-Flaky-Tests/job/${PATCH_BRANCH}/lastSuccessfulBuild/artifact/output/excludes" if wget "${FLAKY_URL}" -O "excludes"; then excludes=$(cat excludes) yetus_debug "excludes=${excludes}" From 99434b15fec70ab7d6acbd3b82d9071bb3c29360 Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Wed, 21 Oct 2020 09:18:35 +0200 Subject: [PATCH 456/769] HBASE-25196 Add deprecation documentation to HConstants (#2559) Add the documentation when HConstants#REPLICATION_DROP_ON_DELETED_TABLE_KEY was deprecated and when it is expected to be removed. Signed-off-by: Duo Zhang --- .../src/main/java/org/apache/hadoop/hbase/HConstants.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 41bf487de055..e1d3de9d513b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1358,7 +1358,9 @@ public enum OperationStatusCode { /** * Drop edits for tables that been deleted from the replication source and target - * @deprecated moved it into HBaseInterClusterReplicationEndpoint + * @deprecated since 3.0.0. Will be removed in 4.0.0. + * Moved it into HBaseInterClusterReplicationEndpoint. + * @see HBASE-24359 */ @Deprecated public static final String REPLICATION_DROP_ON_DELETED_TABLE_KEY = From 81f0a33c4feab613a755bc9da6dafd7d7772cb7f Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Wed, 21 Oct 2020 15:40:45 +0200 Subject: [PATCH 457/769] HBASE-25198 Remove deprecated RpcSchedulerFactory#create (#2561) Remove the deprecated RpcSchedulerFactory#create(Configuration, PriorityFunction) method from the interface and in all implementing classes. Signed-off-by: Duo Zhang --- .../hbase/regionserver/FifoRpcSchedulerFactory.java | 6 ------ .../hadoop/hbase/regionserver/RpcSchedulerFactory.java | 7 ------- .../hbase/regionserver/SimpleRpcSchedulerFactory.java | 10 ---------- 3 files changed, 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java index c77de648f4e9..12896a2d54ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java @@ -38,10 +38,4 @@ public RpcScheduler create(Configuration conf, PriorityFunction priority, Aborta HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); return new FifoRpcScheduler(conf, handlerCount); } - - @Deprecated - @Override - public RpcScheduler create(Configuration conf, PriorityFunction priority) { - return create(conf, priority, null); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java index dbd393db9884..d1d1cfc52942 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java @@ -35,11 +35,4 @@ public interface RpcSchedulerFactory { * Constructs a {@link org.apache.hadoop.hbase.ipc.RpcScheduler}. */ RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server); - - /** - * @deprecated since 1.0.0. - * @see HBASE-12028 - */ - @Deprecated - RpcScheduler create(Configuration conf, PriorityFunction priority); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java index 22a9da548d6f..06b004321c55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java @@ -32,16 +32,6 @@ @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory { - /** - * @deprecated since 1.0.0. - * @see HBASE-12028 - */ - @Override - @Deprecated - public RpcScheduler create(Configuration conf, PriorityFunction priority) { - return create(conf, priority, null); - } - @Override public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) { int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, From 1979f0255d3aff284480c79659d532bea522ac6e Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Wed, 21 Oct 2020 19:04:17 +0200 Subject: [PATCH 458/769] HBASE-25197 Remove SingletonCoprocessorService interface (#2560) Remove the SingletonCoprocessorService interface targeted for removal in 3.0.0. Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../SingletonCoprocessorService.java | 37 ------------------- 1 file changed, 37 deletions(-) delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java deleted file mode 100644 index 2b2aedee22c9..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.coprocessor; - -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; - -import org.apache.hbase.thirdparty.com.google.protobuf.Service; - -/** - * Coprocessor endpoints registered once per server and providing protobuf services should implement - * this interface and return the {@link Service} instance via {@link #getService()}. - * @deprecated Since 2.0. Will be removed in 3.0 - */ -@Deprecated -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -@InterfaceStability.Evolving -public interface SingletonCoprocessorService { - Service getService(); -} From 6124add7c9e4f2fe4cf38b7586ee0563a0a79cfd Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 22 Oct 2020 21:50:43 +0800 Subject: [PATCH 459/769] HBASE-25207 Revisit the implementation and usage of RegionStates.include (#2571) Remove the RegionStates.include method as its name is ambiguous. Add more comments to describe the logic on why we filter region like this. Signed-off-by: Toshihiro Suzuki --- .../apache/hadoop/hbase/master/HMaster.java | 2 +- .../master/assignment/AssignmentManager.java | 17 +++++++++-- .../hbase/master/assignment/RegionStates.java | 28 ++++++++++++------- .../procedure/EnableTableProcedure.java | 4 +-- .../master/assignment/TestRegionStates.java | 23 ++++++--------- 5 files changed, 45 insertions(+), 29 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index f58096fa5407..575feae4c75f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3824,7 +3824,7 @@ public CompactionState getCompactionState(final TableName tableName) { CompactionState compactionState = CompactionState.NONE; try { List regions = - assignmentManager.getRegionStates().getRegionsOfTable(tableName, false); + assignmentManager.getRegionStates().getRegionsOfTable(tableName); for (RegionInfo regionInfo : regions) { ServerName serverName = assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 9a88533f3565..49f1eb1fb567 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -883,8 +883,13 @@ private TransitRegionStateProcedure[] createAssignProcedures( private TransitRegionStateProcedure forceCreateUnssignProcedure(RegionStateNode regionNode) { regionNode.lock(); try { - if (!regionStates.include(regionNode, false) || - regionStates.isRegionOffline(regionNode.getRegionInfo())) { + if (regionNode.isInState(State.OFFLINE, State.CLOSED, State.SPLIT)) { + return null; + } + // in general, a split parent should be in CLOSED or SPLIT state, but anyway, let's check it + // here for safety + if (regionNode.getRegionInfo().isSplit()) { + LOG.warn("{} is a split parent but not in CLOSED or SPLIT state", regionNode); return null; } // As in DisableTableProcedure or ModifyTableProcedure, we will hold the xlock for table, so @@ -1922,6 +1927,14 @@ public void markRegionAsSplit(final RegionInfo parent, final ServerName serverNa nodeB.setState(State.SPLITTING_NEW); TableDescriptor td = master.getTableDescriptors().get(parent.getTable()); + // TODO: here we just update the parent region info in meta, to set split and offline to true, + // without changing the one in the region node. This is a bit confusing but the region info + // field in RegionStateNode is not expected to be changed in the current design. Need to find a + // possible way to address this problem, or at least adding more comments about the trick to + // deal with this problem, that when you want to filter out split parent, you need to check both + // the RegionState on whether it is split, and also the region info. If one of them matches then + // it is a split parent. And usually only one of them can match, as after restart, the region + // state will be changed from SPLIT to CLOSED. regionStateStore.splitRegion(parent, daughterA, daughterB, serverName, td); if (shouldAssignFavoredNodes(parent)) { List onlineServers = this.master.getServerManager().getOnlineServersList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java index 3bb3c4c0b358..061b8175886a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -173,7 +173,7 @@ public void deleteRegions(final List regionInfos) { regionInfos.forEach(this::deleteRegion); } - ArrayList getTableRegionStateNodes(final TableName tableName) { + List getTableRegionStateNodes(final TableName tableName) { final ArrayList regions = new ArrayList(); for (RegionStateNode node: regionsMap.tailMap(tableName.getName()).values()) { if (!node.getTable().equals(tableName)) break; @@ -241,8 +241,10 @@ public boolean hasTableRegionStates(final TableName tableName) { /** * @return Return online regions of table; does not include OFFLINE or SPLITTING regions. */ - public List getRegionsOfTable(final TableName table) { - return getRegionsOfTable(table, false); + public List getRegionsOfTable(TableName table) { + return getRegionsOfTable(table, + regionNode -> !regionNode.isInState(State.OFFLINE, State.SPLIT) && + !regionNode.getRegionInfo().isSplitParent()); } private HRegionLocation createRegionForReopen(RegionStateNode node) { @@ -346,16 +348,22 @@ public HRegionLocation checkReopened(HRegionLocation oldLoc) { } /** - * @return Return online regions of table; does not include OFFLINE or SPLITTING regions. + * Get the regions for enabling a table. + *

    + * Here we want the EnableTableProcedure to be more robust and can be used to fix some nasty + * states, so the checks in this method will be a bit strange. In general, a region can only be + * offline when it is split, for merging we will just delete the parent regions, but with HBCK we + * may force update the state of a region to fix some nasty bugs, so in this method we will try to + * bring the offline regions back if it is not split. That's why we only check for split state + * here. */ - public List getRegionsOfTable(TableName table, boolean offline) { - return getRegionsOfTable(table, state -> include(state, offline)); + public List getRegionsOfTableForEnabling(TableName table) { + return getRegionsOfTable(table, + regionNode -> !regionNode.isInState(State.SPLIT) && !regionNode.getRegionInfo().isSplit()); } /** - * @return Return the regions of the table; does not include OFFLINE unless you set - * offline to true. Does not include regions that are in the - * {@link State#SPLIT} state. + * @return Return the regions of the table and filter them. */ private List getRegionsOfTable(TableName table, Predicate filter) { return getTableRegionStateNodes(table).stream().filter(filter).map(n -> n.getRegionInfo()) @@ -368,7 +376,7 @@ private List getRegionsOfTable(TableName table, Predicatenode (do not include * if split or offline unless offline is set to true. */ - boolean include(final RegionStateNode node, final boolean offline) { + private boolean include(final RegionStateNode node, final boolean offline) { if (LOG.isTraceEnabled()) { LOG.trace("WORKING ON " + node + " " + node.getRegionInfo()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 1e48981e417c..8b295ec72fc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -97,9 +97,9 @@ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableS TableDescriptor tableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); int configuredReplicaCount = tableDescriptor.getRegionReplication(); - // Get regions for the table from memory; get both online and offline regions ('true'). + // Get regions for the table from memory List regionsOfTable = - env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true); + env.getAssignmentManager().getRegionStates().getRegionsOfTableForEnabling(tableName); // How many replicas do we currently have? Check regions returned from // in-memory state. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java index 48cca305700f..b24ec1626849 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java @@ -58,7 +58,7 @@ public class TestRegionStates { protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static ThreadPoolExecutor threadPool; - private static ExecutorCompletionService executorService; + private static ExecutorCompletionService executorService; @BeforeClass public static void setUp() throws Exception { @@ -66,7 +66,7 @@ public static void setUp() throws Exception { new ThreadFactoryBuilder().setNameFormat("ProcedureDispatcher-pool-%d").setDaemon(true) .setUncaughtExceptionHandler((t, e) -> LOG.warn("Failed thread " + t.getName(), e)) .build()); - executorService = new ExecutorCompletionService(threadPool); + executorService = new ExecutorCompletionService<>(threadPool); } @AfterClass @@ -129,13 +129,13 @@ public void testRegionDoubleCreation() throws Exception { checkTableRegions(stateMap, TABLE_NAME_C, NSMALL_RUNS); } - private void checkTableRegions(final RegionStates stateMap, - final TableName tableName, final int nregions) { - List hris = stateMap.getRegionsOfTable(tableName, true); - assertEquals(nregions, hris.size()); - for (int i = 1; i < hris.size(); ++i) { - long a = Bytes.toLong(hris.get(i - 1).getStartKey()); - long b = Bytes.toLong(hris.get(i + 0).getStartKey()); + private void checkTableRegions(final RegionStates stateMap, final TableName tableName, + final int nregions) { + List rns = stateMap.getTableRegionStateNodes(tableName); + assertEquals(nregions, rns.size()); + for (int i = 1; i < rns.size(); ++i) { + long a = Bytes.toLong(rns.get(i - 1).getRegionInfo().getStartKey()); + long b = Bytes.toLong(rns.get(i + 0).getRegionInfo().getStartKey()); assertEquals(b, a + 1); } } @@ -155,11 +155,6 @@ public Object call() { }); } - private Object createRegionNode(final RegionStates stateMap, - final TableName tableName, final long regionId) { - return stateMap.getOrCreateRegionStateNode(createRegionInfo(tableName, regionId)); - } - private RegionInfo createRegionInfo(final TableName tableName, final long regionId) { return RegionInfoBuilder.newBuilder(tableName) .setStartKey(Bytes.toBytes(regionId)) From 488e2077e54c3a8ef1596ef7c61d98a3b0162a06 Mon Sep 17 00:00:00 2001 From: sanjeetnishad95 Date: Fri, 23 Oct 2020 06:12:00 +0530 Subject: [PATCH 460/769] HBASE-25128 RSGroupInfo's toString() and hashCode() does not take into account configuration map. (#2484) Signed-off-by: Guanghao Zhang --- .../java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java index 1aa7ca1fedd0..bb4a4d7c6228 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -203,6 +203,9 @@ public String toString() { sb.append(", "); sb.append(" Tables:"); sb.append(this.tables); + sb.append(", "); + sb.append(" Configurations:"); + sb.append(this.configuration); return sb.toString(); } @@ -239,6 +242,7 @@ public int hashCode() { int result = servers.hashCode(); result = 31 * result + tables.hashCode(); result = 31 * result + name.hashCode(); + result = 31 * result + configuration.hashCode(); return result; } } From 83b979103fa6f85aba2d7279fc8a8bb699a65226 Mon Sep 17 00:00:00 2001 From: Sandeep Pal Date: Fri, 23 Oct 2020 12:23:36 +0530 Subject: [PATCH 461/769] HBASE-25193: Add support for row prefix and type in the WAL Pretty Printer Closes #2556 Signed-off-by: Wellington Chevreuil Signed-off-by: Bharath Vissapragada Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../region/WALProcedurePrettyPrinter.java | 2 +- .../hadoop/hbase/wal/WALPrettyPrinter.java | 89 ++++++++++++++----- 2 files changed, 70 insertions(+), 21 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java index a4ed7339845d..0e60709b5e09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/WALProcedurePrettyPrinter.java @@ -105,7 +105,7 @@ protected int doWork() throws Exception { if (!Bytes.equals(PROC_FAMILY, 0, PROC_FAMILY.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) { // We could have cells other than procedure edits, for example, a flush marker - WALPrettyPrinter.printCell(out, op, false); + WALPrettyPrinter.printCell(out, op, false, false); continue; } long procId = Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index a37efec610eb..07bcb1067ffc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -46,7 +46,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; @@ -85,7 +85,12 @@ public class WALPrettyPrinter { // List of tables for filter private final Set tableSet; private String region; + + // exact row which needs to be filtered private String row; + // prefix of rows which needs to be filtered + private String rowPrefix; + private boolean outputOnlyRowKey; // enable in order to output a single list of transactions from several files private boolean persistentOutput; @@ -107,6 +112,7 @@ public WALPrettyPrinter() { tableSet = new HashSet<>(); region = null; row = null; + rowPrefix = null; outputOnlyRowKey = false; persistentOutput = false; firstTxn = true; @@ -181,6 +187,17 @@ public void setRowFilter(String row) { this.row = row; } + /** + * sets the rowPrefix key prefix by which output will be filtered + * + * @param rowPrefix + * when not null, serves as a filter; only log entries with rows + * having this prefix will be printed + */ + public void setRowPrefixFilter(String rowPrefix) { + this.rowPrefix = rowPrefix; + } + /** * Option to print the row key only in case you just need the row keys from the WAL */ @@ -301,15 +318,12 @@ public void processFile(final Configuration conf, final Path p) List> actions = new ArrayList<>(); for (Cell cell : edit.getCells()) { // add atomic operation to txn - Map op = new HashMap<>(toStringMap(cell, outputOnlyRowKey)); - if (outputValues) { - op.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell))); - } - // check row output filter - if (row == null || ((String) op.get("row")).equals(row)) { - actions.add(op); + Map op = + new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); + if (op.isEmpty()) { + continue; } - op.put("total_size_sum", cell.heapSize()); + actions.add(op); } if (actions.isEmpty()) { continue; @@ -326,15 +340,19 @@ public void processFile(final Configuration conf, final Path p) out.print(GSON.toJson(txn)); } else { // Pretty output, complete with indentation by atomic action - out.println(String.format(outputTmpl, + if (!outputOnlyRowKey) { + out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime))); + } for (int i = 0; i < actions.size(); i++) { Map op = actions.get(i); - printCell(out, op, outputValues); + printCell(out, op, outputValues, outputOnlyRowKey); } } - out.println("edit heap size: " + entry.getEdit().heapSize()); - out.println("position: " + log.getPosition()); + if (!outputOnlyRowKey) { + out.println("edit heap size: " + entry.getEdit().heapSize()); + out.println("position: " + log.getPosition()); + } } } finally { log.close(); @@ -344,9 +362,17 @@ public void processFile(final Configuration conf, final Path p) } } - public static void printCell(PrintStream out, Map op, boolean outputValues) { - out.println("row=" + op.get("row") + ", type=" + op.get("type") + ", column=" + - op.get("family") + ":" + op.get("qualifier")); + public static void printCell(PrintStream out, Map op, + boolean outputValues, boolean outputOnlyRowKey) { + String rowDetails = "row=" + op.get("row"); + if (outputOnlyRowKey) { + out.println(rowDetails); + return; + } + + rowDetails += ", column=" + op.get("family") + ":" + op.get("qualifier"); + rowDetails += ", type=" + op.get("type"); + out.println(rowDetails); if (op.get("tag") != null) { out.println(" tag: " + op.get("tag")); } @@ -356,11 +382,20 @@ public static void printCell(PrintStream out, Map op, boolean ou out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, boolean printRowKeyOnly) { + public static Map toStringMap(Cell cell, + boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); - stringMap.put("row", - Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + String rowKey = Bytes.toStringBinary(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength()); + // Row and row prefix are mutually options so both cannot be true at the + // same time. We can include checks in the same condition + // Check if any of the filters are satisfied by the row, if not return empty map + if ((!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) || + (!Strings.isNullOrEmpty(row) && !rowKey.equals(row))) { + return stringMap; + } + stringMap.put("row", rowKey); if (printRowKeyOnly) { return stringMap; } @@ -372,6 +407,7 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly cell.getQualifierLength())); stringMap.put("timestamp", cell.getTimestamp()); stringMap.put("vlen", cell.getValueLength()); + stringMap.put("total_size_sum", cell.heapSize()); if (cell.getTagsLength() > 0) { List tagsString = new ArrayList<>(); Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); @@ -382,11 +418,14 @@ public static Map toStringMap(Cell cell, boolean printRowKeyOnly } stringMap.put("tag", tagsString); } + if (outputValues) { + stringMap.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell))); + } return stringMap; } public static Map toStringMap(Cell cell) { - return toStringMap(cell, false); + return toStringMap(cell, false, null, null, false); } public static void main(String[] args) throws IOException { @@ -417,6 +456,7 @@ public static void run(String[] args) throws IOException { options.addOption("k", "outputOnlyRowKey", false, "Print only row keys"); options.addOption("w", "row", true, "Row to filter by. Pass row name."); + options.addOption("f", "rowPrefix", true, "Row prefix to filter by."); options.addOption("g", "goto", true, "Position to seek to in the file"); WALPrettyPrinter printer = new WALPrettyPrinter(); @@ -450,8 +490,17 @@ public static void run(String[] args) throws IOException { printer.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s"))); } if (cmd.hasOption("w")) { + if (cmd.hasOption("f")) { + throw new ParseException("Row and Row-prefix cannot be supplied together"); + } printer.setRowFilter(cmd.getOptionValue("w")); } + if (cmd.hasOption("f")) { + if (cmd.hasOption("w")) { + throw new ParseException("Row and Row-prefix cannot be supplied together"); + } + printer.setRowPrefixFilter(cmd.getOptionValue("f")); + } if (cmd.hasOption("g")) { printer.setPosition(Long.parseLong(cmd.getOptionValue("g"))); } From 0b3d7e540e58cdba3930c670e0e74ab2a3aa4974 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Sat, 24 Oct 2020 23:49:14 +0900 Subject: [PATCH 462/769] HBASE-25206 Data loss can happen if a cloned table loses original split region(delete table) (#2569) Signed-off-by: Duo Zhang --- .../hbase/master/assignment/RegionStates.java | 12 +++++++ .../TransitRegionStateProcedure.java | 1 + .../procedure/DeleteTableProcedure.java | 3 +- ...romClientAfterSplittingRegionTestBase.java | 36 +++++++++++++++++++ 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java index 061b8175886a..06378002ecbd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -362,6 +362,18 @@ public List getRegionsOfTableForEnabling(TableName table) { regionNode -> !regionNode.isInState(State.SPLIT) && !regionNode.getRegionInfo().isSplit()); } + /** + * Get the regions for deleting a table. + *

    + * Here we need to return all the regions irrespective of the states in order to archive them + * all. This is because if we don't archive OFFLINE/SPLIT regions and if a snapshot or a cloned + * table references to the regions, we will lose the data of the regions. + */ + public List getRegionsOfTableForDeleting(TableName table) { + return getTableRegionStateNodes(table).stream().map(RegionStateNode::getRegionInfo) + .collect(Collectors.toList()); + } + /** * @return Return the regions of the table and filter them. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index b0a697deaa97..63bb345cffed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -348,6 +348,7 @@ protected Flow executeFromState(MasterProcedureEnv env, RegionStateTransitionSta LOG.error( "Cannot assign replica region {} because its primary region {} does not exist.", regionNode.getRegionInfo(), defaultRI); + regionNode.unsetProcedure(this); return Flow.NO_MORE_STATE; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 9cfce0ce3632..80dddc7ccda1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -99,7 +99,8 @@ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState s // TODO: Move out... in the acquireLock() LOG.debug("Waiting for RIT for {}", this); - regions = env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()); + regions = env.getAssignmentManager().getRegionStates() + .getRegionsOfTableForDeleting(getTableName()); assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; ProcedureSyncWait.waitRegionInTransition(env, regions); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java index 5ed100f6d296..e8c016777283 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java @@ -80,4 +80,40 @@ public void testCloneSnapshotAfterSplittingRegion() throws IOException, Interrup admin.catalogJanitorSwitch(true); } } + + @Test + public void testCloneSnapshotBeforeSplittingRegionAndDroppingTable() + throws IOException, InterruptedException { + // Turn off the CatalogJanitor + admin.catalogJanitorSwitch(false); + + try { + // Take a snapshot + admin.snapshot(snapshotName2, tableName); + + // Clone the snapshot to another table + TableName clonedTableName = + TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + admin.cloneSnapshot(snapshotName2, clonedTableName); + SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); + + // Split the first region of the original table + List regionInfos = admin.getRegions(tableName); + RegionReplicaUtil.removeNonDefaultRegions(regionInfos); + splitRegion(regionInfos.get(0)); + + // Drop the original table + admin.disableTable(tableName); + admin.deleteTable(tableName); + + // Disable and enable the cloned table. This should be successful + admin.disableTable(clonedTableName); + admin.enableTable(clonedTableName); + SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); + + verifyRowCount(TEST_UTIL, clonedTableName, snapshot1Rows); + } finally { + admin.catalogJanitorSwitch(true); + } + } } From 4fdf2fb1271df66585a7f086e1de04b526e5f15b Mon Sep 17 00:00:00 2001 From: Reid Chan Date: Sun, 25 Oct 2020 17:46:14 +0800 Subject: [PATCH 463/769] HBASE-25189 [Metrics] Add checkAndPut and checkAndDelete latency metrics at table level (#2549) Signed-off-by: Viraj Jasani --- .../regionserver/MetricsTableLatencies.java | 25 +++++++++++++ .../MetricsTableLatenciesImpl.java | 36 +++++++++++++++++++ .../regionserver/MetricsRegionServer.java | 15 ++++++-- .../hbase/regionserver/RSRpcServices.java | 9 +++-- .../RegionServerTableMetrics.java | 12 +++++++ .../regionserver/TestMetricsRegionServer.java | 17 ++------- 6 files changed, 94 insertions(+), 20 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 231bad1be879..2aeb82b0d64d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -53,6 +53,9 @@ public interface MetricsTableLatencies { String DELETE_BATCH_TIME = "deleteBatchTime"; String INCREMENT_TIME = "incrementTime"; String APPEND_TIME = "appendTime"; + String CHECK_AND_DELETE_TIME = "checkAndDeleteTime"; + String CHECK_AND_PUT_TIME = "checkAndPutTime"; + String CHECK_AND_MUTATE_TIME = "checkAndMutateTime"; /** * Update the Put time histogram @@ -125,4 +128,26 @@ public interface MetricsTableLatencies { * @param t time it took */ void updateScanTime(String tableName, long t); + + /** + * Update the CheckAndDelete time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndDelete(String nameAsString, long time); + + /** + * Update the CheckAndPut time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndPut(String nameAsString, long time); + + /** + * Update the CheckAndMutate time histogram. + * @param nameAsString The table the metric is for + * @param time time it took + */ + void updateCheckAndMutate(String nameAsString, long time); + } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index 5a3f3b9d2491..5e13a614ff0c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -47,6 +47,9 @@ public static class TableHistograms { final MetricHistogram deleteBatchTimeHisto; final MetricHistogram scanTimeHisto; final MetricHistogram scanSizeHisto; + final MetricHistogram checkAndDeleteTimeHisto; + final MetricHistogram checkAndPutTimeHisto; + final MetricHistogram checkAndMutateTimeHisto; TableHistograms(DynamicMetricsRegistry registry, TableName tn) { getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME)); @@ -60,6 +63,12 @@ public static class TableHistograms { qualifyMetricsName(tn, DELETE_BATCH_TIME)); scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME)); scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE)); + checkAndDeleteTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); + checkAndPutTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); + checkAndMutateTimeHisto = + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); } public void updatePut(long time) { @@ -97,6 +106,18 @@ public void updateScanSize(long scanSize) { public void updateScanTime(long t) { scanTimeHisto.add(t); } + + public void updateCheckAndDeleteTime(long t) { + checkAndDeleteTimeHisto.add(t); + } + + public void updateCheckAndPutTime(long t) { + checkAndPutTimeHisto.add(t); + } + + public void updateCheckAndMutateTime(long t) { + checkAndMutateTimeHisto.add(t); + } } @VisibleForTesting @@ -174,6 +195,21 @@ public void updateScanTime(String tableName, long t) { getOrCreateTableHistogram(tableName).updateScanTime(t); } + @Override + public void updateCheckAndDelete(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndDeleteTime(time); + } + + @Override + public void updateCheckAndPut(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndPutTime(time); + } + + @Override + public void updateCheckAndMutate(String tableName, long time) { + getOrCreateTableHistogram(tableName).updateCheckAndMutateTime(time); + } + @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 715da6c47bd8..e37a2722c9f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -145,15 +145,24 @@ public void updateDeleteBatch(TableName tn, long t) { serverSource.updateDeleteBatch(t); } - public void updateCheckAndDelete(long t) { + public void updateCheckAndDelete(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndDelete(tn, t); + } serverSource.updateCheckAndDelete(t); } - public void updateCheckAndPut(long t) { + public void updateCheckAndPut(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndPut(tn, t); + } serverSource.updateCheckAndPut(t); } - public void updateCheckAndMutate(long t) { + public void updateCheckAndMutate(TableName tn, long t) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateCheckAndMutate(tn, t); + } serverSource.updateCheckAndMutate(t); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index a59f5e609b17..d7ba9fc8a289 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3076,15 +3076,18 @@ private CheckAndMutateResult checkAndMutate(HRegion region, OperationQuota quota MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); if (metricsRegionServer != null) { long after = EnvironmentEdgeManager.currentTime(); - metricsRegionServer.updateCheckAndMutate(after - before); + metricsRegionServer.updateCheckAndMutate( + region.getRegionInfo().getTable(), after - before); MutationType type = mutation.getMutateType(); switch (type) { case PUT: - metricsRegionServer.updateCheckAndPut(after - before); + metricsRegionServer.updateCheckAndPut( + region.getRegionInfo().getTable(), after - before); break; case DELETE: - metricsRegionServer.updateCheckAndDelete(after - before); + metricsRegionServer.updateCheckAndDelete( + region.getRegionInfo().getTable(), after - before); break; default: break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java index ec6c0493bb75..812ae45e8840 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java @@ -65,6 +65,18 @@ public void updateDeleteBatch(TableName table, long time) { latencies.updateDeleteBatch(table.getNameAsString(), time); } + public void updateCheckAndDelete(TableName table, long time) { + latencies.updateCheckAndDelete(table.getNameAsString(), time); + } + + public void updateCheckAndPut(TableName table, long time) { + latencies.updateCheckAndPut(table.getNameAsString(), time); + } + + public void updateCheckAndMutate(TableName table, long time) { + latencies.updateCheckAndMutate(table.getNameAsString(), time); + } + public void updateScanTime(TableName table, long time) { latencies.updateScanTime(table.getNameAsString(), time); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index 574b1e4130c8..e56eb0f20aaa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -151,9 +151,9 @@ public void testSlowCount() { for (int i=0; i < 17; i ++) { rsm.updatePut(null, 17); rsm.updateDelete(null, 17); - rsm.updateCheckAndDelete(17); - rsm.updateCheckAndPut(17); - rsm.updateCheckAndMutate(17); + rsm.updateCheckAndDelete(null, 17); + rsm.updateCheckAndPut(null, 17); + rsm.updateCheckAndMutate(null, 17); } HELPER.assertCounter("appendNumOps", 24, serverSource); @@ -174,17 +174,6 @@ public void testSlowCount() { HELPER.assertCounter("slowPutCount", 16, serverSource); } - String FLUSH_TIME = "flushTime"; - String FLUSH_TIME_DESC = "Histogram for the time in millis for memstore flush"; - String FLUSH_MEMSTORE_SIZE = "flushMemstoreSize"; - String FLUSH_MEMSTORE_SIZE_DESC = "Histogram for number of bytes in the memstore for a flush"; - String FLUSH_FILE_SIZE = "flushFileSize"; - String FLUSH_FILE_SIZE_DESC = "Histogram for number of bytes in the resulting file for a flush"; - String FLUSHED_OUTPUT_BYTES = "flushedOutputBytes"; - String FLUSHED_OUTPUT_BYTES_DESC = "Total number of bytes written from flush"; - String FLUSHED_MEMSTORE_BYTES = "flushedMemstoreBytes"; - String FLUSHED_MEMSTORE_BYTES_DESC = "Total number of bytes of cells in memstore from flush"; - @Test public void testFlush() { rsm.updateFlush(null, 1, 2, 3); From af4fbcf91277a899648a7de0fc8f6108d3b72ab0 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 26 Oct 2020 10:03:43 +0800 Subject: [PATCH 464/769] HBASE-25215 TestClientOperationTimeout.testScanTimeout is flaky (#2583) Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/RSRpcServices.java | 2 +- .../hbase/TestClientOperationTimeout.java | 95 +++++++++++-------- 2 files changed, 55 insertions(+), 42 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index d7ba9fc8a289..e15e8e9c1753 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -271,7 +271,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, AdminService.BlockingInterface, ClientService.BlockingInterface, PriorityFunction, ConfigurationObserver { - protected static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class); + private static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class); /** RPC scheduler to use for the region server. */ public static final String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java index 10a3cb73db6d..eb62c973a655 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hbase; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; + import java.io.IOException; import java.net.SocketTimeoutException; -import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -27,12 +31,12 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.ipc.CallTimeoutException; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RSRpcServices; @@ -40,12 +44,13 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; @@ -69,6 +74,8 @@ @Category({ ClientTests.class, MediumTests.class }) public class TestClientOperationTimeout { + private static final Logger LOG = LoggerFactory.getLogger(TestClientOperationTimeout.class); + @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestClientOperationTimeout.class); @@ -91,7 +98,7 @@ public class TestClientOperationTimeout { private static Table TABLE; @BeforeClass - public static void setUpClass() throws Exception { + public static void setUp() throws Exception { // Set RegionServer class and use default values for other options. StartMiniClusterOption option = StartMiniClusterOption.builder().rsClass(DelayedRegionServer.class).build(); @@ -108,14 +115,6 @@ public static void setUpClass() throws Exception { TABLE = CONN.getTable(TABLE_NAME); } - @Before - public void setUp() throws Exception { - DELAY_GET = 0; - DELAY_SCAN = 0; - DELAY_MUTATE = 0; - DELAY_BATCH_MUTATE = 0; - } - @AfterClass public static void tearDown() throws Exception { Closeables.close(TABLE, true); @@ -123,6 +122,14 @@ public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } + @Before + public void setUpBeforeTest() throws Exception { + DELAY_GET = 0; + DELAY_SCAN = 0; + DELAY_MUTATE = 0; + DELAY_BATCH_MUTATE = 0; + } + /** * Tests that a get on a table throws {@link RetriesExhaustedException} when the operation takes * longer than 'hbase.client.operation.timeout'. @@ -132,10 +139,11 @@ public void testGetTimeout() { DELAY_GET = 600; try { TABLE.get(new Get(ROW)); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof CallTimeoutException); + LOG.info("Got exception for get", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(CallTimeoutException.class)); } } @@ -150,10 +158,11 @@ public void testPutTimeout() { put.addColumn(FAMILY, QUALIFIER, VALUE); try { TABLE.put(put); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof CallTimeoutException); + LOG.info("Got exception for put", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(CallTimeoutException.class)); } } @@ -164,20 +173,17 @@ public void testPutTimeout() { @Test public void testMultiPutsTimeout() { DELAY_BATCH_MUTATE = 600; - Put put1 = new Put(ROW); - put1.addColumn(FAMILY, QUALIFIER, VALUE); - Put put2 = new Put(ROW); - put2.addColumn(FAMILY, QUALIFIER, VALUE); - List puts = new ArrayList<>(); - puts.add(put1); - puts.add(put2); + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + Put put2 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + List puts = Arrays.asList(put1, put2); try { TABLE.batch(puts, new Object[2]); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof RetriesExhaustedException - && e.getCause().getCause() instanceof CallTimeoutException); + LOG.info("Got exception for batch", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause().getCause(), instanceOf(CallTimeoutException.class)); } } @@ -186,19 +192,26 @@ public void testMultiPutsTimeout() { * longer than 'hbase.client.scanner.timeout.period'. */ @Test - public void testScanTimeout() { + public void testScanTimeout() throws IOException, InterruptedException { + // cache the region location. + try (RegionLocator locator = TABLE.getRegionLocator()) { + locator.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY); + } + // sleep a bit to make sure the location has been cached as it is an async operation. + Thread.sleep(100); DELAY_SCAN = 600; - try { - ResultScanner scanner = TABLE.getScanner(new Scan()); + try (ResultScanner scanner = TABLE.getScanner(new Scan())) { scanner.next(); - Assert.fail("should not reach here"); + fail("should not reach here"); } catch (Exception e) { - Assert.assertTrue( - e instanceof RetriesExhaustedException && e.getCause() instanceof TimeoutIOException); + LOG.info("Got exception for scan", e); + assertThat(e, instanceOf(RetriesExhaustedException.class)); + assertThat(e.getCause(), instanceOf(CallTimeoutException.class)); } } - private static class DelayedRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer { + public static final class DelayedRegionServer + extends MiniHBaseCluster.MiniHBaseClusterRegionServer { public DelayedRegionServer(Configuration conf) throws IOException, InterruptedException { super(conf); } @@ -212,14 +225,14 @@ protected RSRpcServices createRpcServices() throws IOException { /** * This {@link RSRpcServices} class injects delay for Rpc calls and after executes super methods. */ - public static class DelayedRSRpcServices extends RSRpcServices { + private static final class DelayedRSRpcServices extends RSRpcServices { DelayedRSRpcServices(HRegionServer rs) throws IOException { super(rs); } @Override public ClientProtos.GetResponse get(RpcController controller, ClientProtos.GetRequest request) - throws ServiceException { + throws ServiceException { try { Thread.sleep(DELAY_GET); } catch (InterruptedException e) { @@ -230,7 +243,7 @@ public ClientProtos.GetResponse get(RpcController controller, ClientProtos.GetRe @Override public ClientProtos.MutateResponse mutate(RpcController rpcc, - ClientProtos.MutateRequest request) throws ServiceException { + ClientProtos.MutateRequest request) throws ServiceException { try { Thread.sleep(DELAY_MUTATE); } catch (InterruptedException e) { @@ -241,7 +254,7 @@ public ClientProtos.MutateResponse mutate(RpcController rpcc, @Override public ClientProtos.ScanResponse scan(RpcController controller, - ClientProtos.ScanRequest request) throws ServiceException { + ClientProtos.ScanRequest request) throws ServiceException { try { Thread.sleep(DELAY_SCAN); } catch (InterruptedException e) { @@ -252,7 +265,7 @@ public ClientProtos.ScanResponse scan(RpcController controller, @Override public ClientProtos.MultiResponse multi(RpcController rpcc, ClientProtos.MultiRequest request) - throws ServiceException { + throws ServiceException { try { Thread.sleep(DELAY_BATCH_MUTATE); } catch (InterruptedException e) { From 066390fec6f2c6710e43ec8a229d326f88fce4c4 Mon Sep 17 00:00:00 2001 From: WenFeiYi Date: Tue, 27 Oct 2020 22:01:57 +0800 Subject: [PATCH 465/769] HBASE-25173 Remove owner related methods in TableDescriptor/TableDescriptorBuilder (#2541) Signed-off-by: Duo Zhang --- .../hadoop/hbase/client/TableDescriptor.java | 7 --- .../hbase/client/TableDescriptorBuilder.java | 59 ------------------- .../hbase/coprocessor/TestSecureExport.java | 14 ++--- .../security/access/AccessController.java | 8 +-- .../SnapshotScannerHDFSAclController.java | 3 +- .../hadoop/hbase/HBaseTestingUtility.java | 26 +++++++- .../hbase/client/SnapshotWithAclTestBase.java | 13 ++-- .../hbase/rsgroup/TestRSGroupsWithACL.java | 6 +- .../hbase/security/access/SecureTestUtil.java | 12 ++++ .../security/access/TestAccessController.java | 34 +++++------ .../access/TestAccessController3.java | 8 ++- .../TestCellACLWithMultipleVersions.java | 13 ++-- .../hbase/security/access/TestCellACLs.java | 10 ++-- .../security/access/TestHDFSAclHelper.java | 29 +++++---- .../access/TestScanEarlyTermination.java | 9 +-- .../access/TestWithDisabledAuthorization.java | 10 ++-- hbase-shell/src/main/ruby/hbase/admin.rb | 3 +- .../src/main/ruby/shell/commands/alter.rb | 2 +- .../src/main/ruby/shell/commands/create.rb | 2 +- hbase-shell/src/test/ruby/hbase/admin_test.rb | 7 +-- 20 files changed, 123 insertions(+), 152 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index a4523872c9c5..1440c28787d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -177,13 +177,6 @@ public interface TableDescriptor { */ TableName getTableName(); - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - String getOwnerString(); - /** * Get the region server group this table belongs to. The regions of this table will be placed * only on the region servers within this group. If not present, will be placed on diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 1328f7d017e2..c611a217960d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -71,12 +70,6 @@ public class TableDescriptorBuilder { private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); - @InterfaceAudience.Private - public static final String OWNER = "OWNER"; - @InterfaceAudience.Private - public static final Bytes OWNER_KEY - = new Bytes(Bytes.toBytes(OWNER)); - /** * Used by rest interface to access this metadata attribute * which denotes if the table is Read Only. @@ -485,26 +478,6 @@ public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { return this; } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public TableDescriptorBuilder setOwner(User owner) { - desc.setOwner(owner); - return this; - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public TableDescriptorBuilder setOwnerString(String ownerString) { - desc.setOwnerString(ownerString); - return this; - } - public TableDescriptorBuilder setPriority(int priority) { desc.setPriority(priority); return this; @@ -1550,38 +1523,6 @@ public void removeCoprocessor(String className) { } } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Deprecated - public ModifyableTableDescriptor setOwner(User owner) { - return setOwnerString(owner != null ? owner.getShortName() : null); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - // used by admin.rb:alter(table_name,*args) to update owner. - @Deprecated - public ModifyableTableDescriptor setOwnerString(String ownerString) { - return setValue(OWNER_KEY, ownerString); - } - - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. - * @see HBASE-15583 - */ - @Override - @Deprecated - public String getOwnerString() { - // Note that every table should have an owner (i.e. should have OWNER_KEY set). - // hbase:meta should return system user as owner, not null (see - // MasterFileSystem.java:bootstrap()). - return getOrDefault(OWNER_KEY, Function.identity(), null); - } - /** * @return the bytes in pb format */ diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 2f5024737dbc..d3be45b56f68 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -216,6 +216,7 @@ public static void beforeClass() throws Exception { Permission.Action.EXEC, Permission.Action.READ, Permission.Action.WRITE); + SecureTestUtil.grantGlobal(UTIL, USER_OWNER, Permission.Action.CREATE); addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER), Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); } @@ -236,11 +237,11 @@ public static void afterClass() throws Exception { public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) + .newBuilder(TableName.valueOf(exportTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); SecureTestUtil.grantOnTable(UTIL, USER_RO, TableName.valueOf(exportTable), null, null, Permission.Action.READ); @@ -340,9 +341,9 @@ public void testVisibilityLabels() throws IOException, Throwable { final TableDescriptor exportHtd = TableDescriptorBuilder .newBuilder(TableName.valueOf(exportTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); AccessTestAction putAction = () -> { Put p1 = new Put(ROW1); p1.addColumn(FAMILYA, QUAL, NOW, QUAL); @@ -398,9 +399,8 @@ public void testVisibilityLabels() throws IOException, Throwable { final TableDescriptor importHtd = TableDescriptorBuilder .newBuilder(TableName.valueOf(importTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) - .setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, importHtd, new byte[][]{Bytes.toBytes("s")}); + SecureTestUtil.createTable(UTIL, owner, importHtd, new byte[][]{Bytes.toBytes("s")}); AccessTestAction importAction = () -> { String[] args = new String[]{ "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 3779903f869a..3a6c3aae657b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -804,10 +804,7 @@ public void postCompletedCreateTableAction( + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " + getClass().getSimpleName() + " should be configured as the first Coprocessor"); } else { - String owner = desc.getOwnerString(); - // default the table owner to current user, if not specified. - if (owner == null) - owner = getActiveUser(c).getShortName(); + String owner = getActiveUser(c).getShortName(); final UserPermission userPermission = new UserPermission(owner, Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); // switch to the real hbase master user for doing the RPC on the ACL table @@ -906,8 +903,7 @@ public void postModifyTable(ObserverContext c, Tab TableDescriptor oldDesc, TableDescriptor currentDesc) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); // default the table owner to current user, if not specified. - final String owner = (currentDesc.getOwnerString() != null) ? currentDesc.getOwnerString() : - getActiveUser(c).getShortName(); + final String owner = getActiveUser(c).getShortName(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java index 5c4ba0d68505..e52134e7d065 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java @@ -179,8 +179,7 @@ public void postCompletedCreateTableAction(ObserverContext perms = admin.getUserPermissions(GetUserPermissionsRequest.newBuilder(tableName).build()); @@ -1724,13 +1726,9 @@ public void testPermissionList() throws Exception { assertFalse("User should not be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); - // disable table before modification - admin.disableTable(tableName); - User newOwner = User.createUserForTesting(conf, "new_owner", new String[] {}); - tableDescriptor = - TableDescriptorBuilder.newBuilder(tableDescriptor).setOwner(newOwner).build(); - admin.modifyTable(tableDescriptor); + grantOnTable(TEST_UTIL, newOwner.getShortName(), tableName, + null, null, Permission.Action.values()); perms = admin.getUserPermissions(GetUserPermissionsRequest.newBuilder(tableName).build()); UserPermission newOwnerperm = new UserPermission(newOwner.getName(), @@ -1758,7 +1756,7 @@ public void testGlobalPermissionList() throws Exception { new UserPermission(user, Permission.newBuilder().withActions(Action.values()).build())); } assertTrue("Only super users, global users and user admin has permission on table hbase:acl " + - "per setup", perms.size() == 5 + superUsers.size() && + "per setup", perms.size() == 6 + superUsers.size() && hasFoundUserPermission(adminPerms, perms)); } @@ -2278,8 +2276,8 @@ private void createTestTable(TableName tname) throws Exception { private void createTestTable(TableName tname, byte[] cf) throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tname) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cf).setMaxVersions(100).build()) - .setOwner(USER_OWNER).build(); - createTable(TEST_UTIL, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + .build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); } @Test @@ -2858,7 +2856,7 @@ public void testAccessControllerUserPermsRegexHandling() throws Exception { // Verify that we can read sys-tables String aclTableName = PermissionStorage.ACL_TABLE_NAME.getNameAsString(); - assertEquals(5, SUPERUSER.runAs(getPrivilegedAction(aclTableName)).size()); + assertEquals(6, SUPERUSER.runAs(getPrivilegedAction(aclTableName)).size()); assertEquals(0, testRegexHandler.runAs(getPrivilegedAction(aclTableName)).size()); // Grant TABLE ADMIN privs to testUserPerms @@ -3517,10 +3515,10 @@ public Object run() throws Exception { // Validate global user permission List userPermissions; - assertEquals(5 + superUserCount, AccessControlClient.getUserPermissions(conn, null).size()); - assertEquals(5 + superUserCount, + assertEquals(6 + superUserCount, AccessControlClient.getUserPermissions(conn, null).size()); + assertEquals(6 + superUserCount, AccessControlClient.getUserPermissions(conn, HConstants.EMPTY_STRING).size()); - assertEquals(5 + superUserCount, + assertEquals(6 + superUserCount, AccessControlClient.getUserPermissions(conn, null, HConstants.EMPTY_STRING).size()); userPermissions = AccessControlClient.getUserPermissions(conn, null, USER_ADMIN.getName()); verifyGetUserPermissionResult(userPermissions, 1, null, null, USER_ADMIN.getName(), superUsers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java index 53e07ff8101e..8860d5f06587 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java @@ -189,6 +189,9 @@ public static void setupBeforeClass() throws Exception { USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE }); + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Permission.Action.CREATE); + systemUserConnection = TEST_UTIL.getConnection(); setUpTableAndUserPermissions(); } @@ -207,9 +210,8 @@ public static void tearDownAfterClass() throws Exception { private static void setUpTableAndUserPermissions() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) - .setOwner(USER_OWNER).build(); - createTable(TEST_UTIL, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index 1d3a54e13f6d..f3035a96cc4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -125,6 +124,9 @@ public static void setupBeforeClass() throws Exception { GROUP_USER = User.createUserForTesting(conf, "group_user", new String[] { GROUP }); usersAndGroups = new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }; + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -138,14 +140,9 @@ public void setUp() throws Exception { .setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY1).setMaxVersions(4).build()) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY2).setMaxVersions(4).build()) - .setOwner(USER_OWNER).build(); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY2).setMaxVersions(4).build()).build(); // Create the test table (owner added to the _acl_ table) - try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - try (Admin admin = connection.getAdmin()) { - admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") }); - } - } + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitTableEnabled(testTable.getTableName()); LOG.info("Sleeping a second because of HBASE-12581"); Threads.sleep(1000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java index 1515e1410a59..6d238284cdd4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -127,6 +126,9 @@ public static void setupBeforeClass() throws Exception { GROUP_USER = User.createUserForTesting(conf, "group_user", new String[] { GROUP }); usersAndGroups = new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }; + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -137,12 +139,10 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { // Create the test table (owner added to the _acl_ table) - Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(4).build()) - .setOwner(USER_OWNER).build(); - admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(4).build()).build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitTableEnabled(testTable.getTableName()); LOG.info("Sleeping a second because of HBASE-12581"); Threads.sleep(1000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java index 420fb977bf4f..e4e37e08ba7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java @@ -39,6 +39,8 @@ final class TestHDFSAclHelper { private static final Logger LOG = LoggerFactory.getLogger(TestHDFSAclHelper.class); + private static final String USER_OWNER = "owner"; + private TestHDFSAclHelper() { } @@ -55,33 +57,41 @@ static void createNamespace(HBaseTestingUtility util, String namespace) throws I } } - static Table createTable(HBaseTestingUtility util, TableName tableName) throws IOException { + static Table createTable(HBaseTestingUtility util, TableName tableName) throws Exception { createNamespace(util, tableName.getNamespaceAsString()); TableDescriptor td = getTableDescriptorBuilder(util, tableName) .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; - return util.createTable(td, splits); + User user = User.createUserForTesting(util.getConfiguration(), USER_OWNER, new String[] {}); + SecureTestUtil.grantGlobal(util, user.getShortName(), Permission.Action.CREATE); + SecureTestUtil.createTable(util, user, td, splits); + return util.getConnection().getTable(tableName); } - static Table createMobTable(HBaseTestingUtility util, TableName tableName) throws IOException { + static Table createMobTable(HBaseTestingUtility util, TableName tableName) throws Exception { createNamespace(util, tableName.getNamespaceAsString()); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN1).setMobEnabled(true) .setMobThreshold(0).build()) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN2).setMobEnabled(true) .setMobThreshold(0).build()) - .setOwner(User.createUserForTesting(util.getConfiguration(), "owner", new String[] {})) .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; - return util.createTable(td, splits); + User user = User.createUserForTesting(util.getConfiguration(), USER_OWNER, new String[] {}); + SecureTestUtil.grantGlobal(util, user.getShortName(), Permission.Action.CREATE); + SecureTestUtil.createTable(util, user, td, splits); + return util.getConnection().getTable(tableName); } static TableDescriptor createUserScanSnapshotDisabledTable(HBaseTestingUtility util, - TableName tableName) throws IOException { + TableName tableName) throws Exception { createNamespace(util, tableName.getNamespaceAsString()); TableDescriptor td = getTableDescriptorBuilder(util, tableName).build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; - try (Table t = util.createTable(td, splits)) { + User user = User.createUserForTesting(util.getConfiguration(), USER_OWNER, new String[] {}); + SecureTestUtil.grantGlobal(util, user.getShortName(), Permission.Action.CREATE); + SecureTestUtil.createTable(util, user, td, splits); + try (Table t = util.getConnection().getTable(tableName)) { put(t); } return td; @@ -91,11 +101,10 @@ static TableDescriptorBuilder getTableDescriptorBuilder(HBaseTestingUtility util TableName tableName) { return TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN1).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN2).build()) - .setOwner(User.createUserForTesting(util.getConfiguration(), "owner", new String[] {})); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN2).build()); } - static void createTableAndPut(HBaseTestingUtility util, TableName tableNam) throws IOException { + static void createTableAndPut(HBaseTestingUtility util, TableName tableNam) throws Exception { try (Table t = createTable(util, tableNam)) { put(t); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java index f8ac4f696aee..aade90ca6157 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -107,6 +106,9 @@ public static void setupBeforeClass() throws Exception { // create a set of test users USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]); USER_OTHER = User.createUserForTesting(conf, "other", new String[0]); + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -116,9 +118,8 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { - Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(testTable.getTableName()).setOwner(USER_OWNER) + TableDescriptorBuilder.newBuilder(testTable.getTableName()) .setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY1).setMaxVersions(10).build()) .setColumnFamily( @@ -127,7 +128,7 @@ public void setUp() throws Exception { // want to confirm that the per-table configuration is properly picked up. .setValue(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, "true").build(); - admin.createTable(tableDescriptor); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor); TEST_UTIL.waitUntilAllRegionsAssigned(testTable.getTableName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index 47458f3dd372..a08456a8917a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; @@ -167,6 +166,9 @@ public static void setupBeforeClass() throws Exception { USER_RO = User.createUserForTesting(conf, "rouser", new String[0]); USER_QUAL = User.createUserForTesting(conf, "rwpartial", new String[0]); USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); + + // Grant table creation permission to USER_OWNER + grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Action.CREATE); } @AfterClass @@ -177,12 +179,10 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { // Create the test table (owner added to the _acl_ table) - Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) - .setOwner(USER_OWNER).build(); - admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") }); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitUntilAllRegionsAssigned(testTable.getTableName()); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(testTable.getTableName()).get(0); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index a91b273033c9..d3492fa7c1c5 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1470,8 +1470,8 @@ def list_locks end # Parse arguments and update TableDescriptorBuilder accordingly + # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity def update_tdb_from_arg(tdb, arg) - tdb.setOwnerString(arg.delete(TableDescriptorBuilder::OWNER)) if arg.include?(TableDescriptorBuilder::OWNER) tdb.setMaxFileSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MAX_FILESIZE))) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE) tdb.setReadOnly(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::READONLY))) if arg.include?(TableDescriptorBuilder::READONLY) tdb.setCompactionEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::COMPACTION_ENABLED))) if arg.include?(TableDescriptorBuilder::COMPACTION_ENABLED) @@ -1490,6 +1490,7 @@ def update_tdb_from_arg(tdb, arg) set_user_metadata(tdb, arg.delete(METADATA)) if arg[METADATA] set_descriptor_config(tdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] end + # rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity #---------------------------------------------------------------------------------------------- # clear compaction queues diff --git a/hbase-shell/src/main/ruby/shell/commands/alter.rb b/hbase-shell/src/main/ruby/shell/commands/alter.rb index 456d6d5dbd12..22e6e42e69c3 100644 --- a/hbase-shell/src/main/ruby/shell/commands/alter.rb +++ b/hbase-shell/src/main/ruby/shell/commands/alter.rb @@ -95,7 +95,7 @@ def help hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 }, { MAX_FILESIZE => '134217728' }, { METHOD => 'delete', NAME => 'f2' }, - OWNER => 'johndoe', METADATA => { 'mykey' => 'myvalue' } + METADATA => { 'mykey' => 'myvalue' } EOF end diff --git a/hbase-shell/src/main/ruby/shell/commands/create.rb b/hbase-shell/src/main/ruby/shell/commands/create.rb index b82b2bfc346d..897e8a744b9f 100644 --- a/hbase-shell/src/main/ruby/shell/commands/create.rb +++ b/hbase-shell/src/main/ruby/shell/commands/create.rb @@ -45,7 +45,7 @@ def help hbase> create 'ns1:t1', 'f1', SPLITS => ['10', '20', '30', '40'] hbase> create 't1', 'f1', SPLITS => ['10', '20', '30', '40'] - hbase> create 't1', 'f1', SPLITS_FILE => 'splits.txt', OWNER => 'johndoe' + hbase> create 't1', 'f1', SPLITS_FILE => 'splits.txt' hbase> create 't1', {NAME => 'f1', VERSIONS => 5}, METADATA => { 'mykey' => 'myvalue' } hbase> # Optionally pre-split the table into NUMREGIONS, using hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname) diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index fac52ede51b7..64a4a8b425c6 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -426,7 +426,7 @@ def teardown define_test "create should fail without columns when called with options" do drop_test_table(@create_test_name) assert_raise(ArgumentError) do - command(:create, @create_test_name, { OWNER => 'a' }) + command(:create, @create_test_name, { VERSIONS => '1' }) end end @@ -460,7 +460,6 @@ def teardown define_test "create should be able to set table options" do drop_test_table(@create_test_name) command(:create, @create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, - OWNER => '987654321', PRIORITY => '77', FLUSH_POLICY => 'org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy', REGION_MEMSTORE_REPLICATION => 'TRUE', @@ -470,7 +469,6 @@ def teardown MERGE_ENABLED => 'false') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_match(/12345678/, admin.describe(@create_test_name)) - assert_match(/987654321/, admin.describe(@create_test_name)) assert_match(/77/, admin.describe(@create_test_name)) assert_match(/'COMPACTION_ENABLED' => 'false'/, admin.describe(@create_test_name)) assert_match(/'SPLIT_ENABLED' => 'false'/, admin.describe(@create_test_name)) @@ -484,9 +482,8 @@ def teardown define_test "create should ignore table_att" do drop_test_table(@create_test_name) - command(:create, @create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321') + command(:create, @create_test_name, 'a', 'b', METHOD => 'table_att') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) - assert_match(/987654321/, admin.describe(@create_test_name)) end define_test "create should work with SPLITALGO" do From 50d50a59869d3ad8d07805bbc61aab3bcb3e736f Mon Sep 17 00:00:00 2001 From: niuyulin Date: Wed, 28 Oct 2020 14:52:27 +0800 Subject: [PATCH 466/769] HBASE-25176 MasterStoppedException should be moved to hbase-client module (#2538) Signed-off-by: Duo Zhang --- .../exceptions/MasterStoppedException.java | 34 +++++++++++++++++++ .../apache/hadoop/hbase/master/HMaster.java | 7 +--- 2 files changed, 35 insertions(+), 6 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java new file mode 100644 index 000000000000..1ed5b55410ff --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MasterStoppedException.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.exceptions; + +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Thrown when the master is stopped + */ +@InterfaceAudience.Private +public class MasterStoppedException extends DoNotRetryIOException { + + private static final long serialVersionUID = -4284604435898100365L; + + public MasterStoppedException() { + super(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 575feae4c75f..86d3983c5677 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.exceptions.MasterStoppedException; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.http.InfoServer; @@ -2853,12 +2854,6 @@ protected void checkServiceStarted() throws ServerNotRunningYetException { } } - public static class MasterStoppedException extends DoNotRetryIOException { - MasterStoppedException() { - super(); - } - } - void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException, MasterNotRunningException, MasterStoppedException { checkServiceStarted(); From 1ac12dd17209bf0bc7bb93fcf8fdcae7b363ed3c Mon Sep 17 00:00:00 2001 From: niuyulin Date: Wed, 28 Oct 2020 14:58:28 +0800 Subject: [PATCH 467/769] HBASE-25201 YouAreDeadException should be moved to hbase-server module (#2581) Signed-off-by: Duo Zhang --- .../java/org/apache/hadoop/hbase/YouAreDeadException.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename {hbase-client => hbase-server}/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java (90%) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java similarity index 90% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java index 0c44b9a2cc42..6ba719a4acb1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -24,9 +24,9 @@ import org.apache.yetus.audience.InterfaceStability; /** - * This exception is thrown by the master when a region server reports and is - * already being processed as dead. This can happen when a region server loses - * its session but didn't figure it yet. + * This exception is thrown by the master when a region server reports and is already being + * processed as dead. This can happen when a region server loses its session but didn't figure it + * yet. */ @SuppressWarnings("serial") @InterfaceAudience.Private From ea8bee703483ecade4e8aa88c90a9bc31ec4cb4a Mon Sep 17 00:00:00 2001 From: Minji Kim Date: Thu, 29 Oct 2020 01:17:31 +0900 Subject: [PATCH 468/769] HBASE-25223 Use try-with-resources statement (#2592) Signed-off-by: Wei-Chiu Chuang Signed-off-by: Viraj Jasani Signed-off-by: Duo Zhang Signed-off-by: stack --- .../hadoop/hbase/snapshot/CreateSnapshot.java | 23 +++++----------- .../snapshot/SnapshotDescriptionUtils.java | 26 +++++-------------- .../hbase/snapshot/SnapshotManifest.java | 11 ++------ .../hbase/snapshot/SnapshotManifestV2.java | 17 ++++-------- 4 files changed, 19 insertions(+), 58 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java index d0fc80337f85..f8e54c9c459c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java @@ -67,24 +67,13 @@ protected void processOptions(CommandLine cmd) { @Override protected int doWork() throws Exception { - Connection connection = null; - Admin admin = null; - try { - connection = ConnectionFactory.createConnection(getConf()); - admin = connection.getAdmin(); - admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); } catch (Exception e) { - System.err.println("failed to take the snapshot: " + e.getMessage()); - return -1; - } finally { - if (admin != null) { - admin.close(); - } - if (connection != null) { - connection.close(); - } + System.err.println("failed to take the snapshot: " + e.getMessage()); + return -1; } - return 0; + return 0; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index b54eab1372a3..c059792ca68e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -343,13 +343,8 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD FsPermission perms = CommonFSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY); Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - try { - FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true); - try { - snapshot.writeTo(out); - } finally { - out.close(); - } + try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)){ + snapshot.writeTo(out); } catch (IOException e) { // if we get an exception, try to remove the snapshot info if (!fs.delete(snapshotInfo, false)) { @@ -370,15 +365,8 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) throws CorruptedSnapshotException { Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE); - try { - FSDataInputStream in = null; - try { - in = fs.open(snapshotInfo); - SnapshotDescription desc = SnapshotDescription.parseFrom(in); - return desc; - } finally { - if (in != null) in.close(); - } + try (FSDataInputStream in = fs.open(snapshotInfo)){ + return SnapshotDescription.parseFrom(in); } catch (IOException e) { throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e); } @@ -434,10 +422,8 @@ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDes } public static boolean isSecurityAvailable(Configuration conf) throws IOException { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - try (Admin admin = conn.getAdmin()) { - return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); - } + try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { + return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 61bf192eb894..9df33e131327 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -582,11 +582,8 @@ private void convertToV2SingleManifest() throws IOException { */ private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException { - FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME)); - try { + try (FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { manifest.writeTo(stream); - } finally { - stream.close(); } } @@ -594,9 +591,7 @@ private void writeDataManifest(final SnapshotDataManifest manifest) * Read the SnapshotDataManifest file */ private SnapshotDataManifest readDataManifest() throws IOException { - FSDataInputStream in = null; - try { - in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME)); + try (FSDataInputStream in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME))) { CodedInputStream cin = CodedInputStream.newInstance(in); cin.setSizeLimit(manifestSizeLimit); return SnapshotDataManifest.parseFrom(cin); @@ -604,8 +599,6 @@ private SnapshotDataManifest readDataManifest() throws IOException { return null; } catch (InvalidProtocolBufferException e) { throw new CorruptedSnapshotException("unable to parse data manifest " + e.getMessage(), e); - } finally { - if (in != null) in.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index 4f3df2fddc90..ae914f69b5cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -93,12 +93,9 @@ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOEx FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf); if (workingDirFs.exists(snapshotDir)) { SnapshotRegionManifest manifest = region.build(); - FSDataOutputStream stream = workingDirFs.create( - getRegionManifestPath(snapshotDir, manifest)); - try { + try (FSDataOutputStream stream = workingDirFs.create( + getRegionManifestPath(snapshotDir, manifest))) { manifest.writeTo(stream); - } finally { - stream.close(); } } else { LOG.warn("can't write manifest without parent dir, maybe it has been deleted by master?"); @@ -157,14 +154,10 @@ public boolean accept(Path path) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { - FSDataInputStream stream = fs.open(st.getPath()); - CodedInputStream cin = CodedInputStream.newInstance(stream); - cin.setSizeLimit(manifestSizeLimit); - - try { + try (FSDataInputStream stream = fs.open(st.getPath())) { + CodedInputStream cin = CodedInputStream.newInstance(stream); + cin.setSizeLimit(manifestSizeLimit); return SnapshotRegionManifest.parseFrom(cin); - } finally { - stream.close(); } } }); From f09764d21258dab8435a215521fd58ac2bc97a2d Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Wed, 28 Oct 2020 18:08:05 +0100 Subject: [PATCH 469/769] HBASE-25224 Maximize sleep for checking meta and namespace regions availability (#2593) Signed-off-by: Michael Stack --- .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 86d3983c5677..58a805334f36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1253,7 +1253,7 @@ private boolean isRegionOnline(RegionInfo ri) { ri.getRegionNameAsString(), rs, optProc.isPresent()); // Check once-a-minute. if (rc == null) { - rc = new RetryCounterFactory(1000).create(); + rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create(); } Threads.sleep(rc.getBackoffTimeAndIncrementAttempts()); } From ba14e61bf84e4ac54ed6e8648b020ee2754b0543 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 29 Oct 2020 09:21:18 -0700 Subject: [PATCH 470/769] HBASE-24845 Git/Jira Release Audit: limit branches when building audit db (#2238) Populating the audit database with release tag information from git is time consuming. Until that's sorted out, give the user a flag for limiting which branches they want to be reviewed. Signed-off-by: Andrew Purtell --- dev-support/git-jira-release-audit/README.md | 9 +++++++-- .../git-jira-release-audit/git_jira_release_audit.py | 11 ++++++++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/dev-support/git-jira-release-audit/README.md b/dev-support/git-jira-release-audit/README.md index 396128ad55df..6ea575e16fd3 100644 --- a/dev-support/git-jira-release-audit/README.md +++ b/dev-support/git-jira-release-audit/README.md @@ -62,6 +62,7 @@ usage: git_jira_release_audit.py [-h] [--populate-from-git POPULATE_FROM_GIT] [--release-line-regexp RELEASE_LINE_REGEXP] [--parse-release-tags PARSE_RELEASE_TAGS] [--fallback-actions-path FALLBACK_ACTIONS_PATH] + [--branch-filter-regexp BRANCH_FILTER_REGEXP] [--jira-url JIRA_URL] --branch-1-fix-version BRANCH_1_FIX_VERSION --branch-2-fix-version BRANCH_2_FIX_VERSION @@ -119,6 +120,9 @@ Interactions with the Git repo: --fallback-actions-path FALLBACK_ACTIONS_PATH Path to a file containing _DB.Actions applicable to specific git shas. (default: fallback_actions.csv) + --branch-filter-regexp BRANCH_FILTER_REGEXP + Limit repo parsing to branch names that match this + filter expression. (default: .*) --branch-1-fix-version BRANCH_1_FIX_VERSION The Jira fixVersion used to indicate an issue is committed to the specified release line branch @@ -175,8 +179,9 @@ fetch from Jira 100%|███████████████████ Optionally, the database can be build to include release tags, by specifying `--parse-release-tags=true`. This is more time-consuming, but is necessary for -auditing discrepancies between git and Jira. Running the same command but -including this flag looks like this: +auditing discrepancies between git and Jira. Optionally, limit the branches +under consideration by specifying a regex filter with `--branch-filter-regexp`. +Running the same command but including this flag looks like this: ```shell script origin/branch-1 100%|███████████████████████████████████████| 4084/4084 [08:58<00:00, 7.59 commit/s] diff --git a/dev-support/git-jira-release-audit/git_jira_release_audit.py b/dev-support/git-jira-release-audit/git_jira_release_audit.py index db2788d081d0..358dfd533502 100644 --- a/dev-support/git-jira-release-audit/git_jira_release_audit.py +++ b/dev-support/git-jira-release-audit/git_jira_release_audit.py @@ -199,13 +199,14 @@ class _RepoReader: _identify_amend_jira_id_pattern = re.compile(r'^amend (.+)', re.IGNORECASE) def __init__(self, db, fallback_actions_path, remote_name, development_branch, - release_line_regexp, parse_release_tags, **_kwargs): + release_line_regexp, branch_filter_regexp, parse_release_tags, **_kwargs): self._db = db self._repo = _RepoReader._open_repo() self._fallback_actions = _RepoReader._load_fallback_actions(fallback_actions_path) self._remote_name = remote_name self._development_branch = development_branch self._release_line_regexp = release_line_regexp + self._branch_filter_regexp = branch_filter_regexp self._parse_release_tags = parse_release_tags @property @@ -364,6 +365,10 @@ def populate_db_release_branch(self, origin_commit, release_branch): release_branch (str): The name of the ref whose history is to be parsed. """ global MANAGER + branch_filter_pattern = re.compile('%s/%s' % (self._remote_name, self._branch_filter_regexp)) + if not branch_filter_pattern.match(release_branch): + return + commits = list(self._repo.iter_commits( "%s...%s" % (origin_commit.hexsha, release_branch), reverse=True)) LOG.info("%s has %d commits since its origin at %s.", release_branch, len(commits), @@ -638,6 +643,10 @@ def _build_first_pass_parser(): '--fallback-actions-path', help='Path to a file containing _DB.Actions applicable to specific git shas.', default='fallback_actions.csv') + git_repo_group.add_argument( + '--branch-filter-regexp', + help='Limit repo parsing to branch names that match this filter expression.', + default=r'.*') jira_group = parser.add_argument_group('Interactions with Jira') jira_group.add_argument( '--jira-url', From b0df3793599c675e236cb4fcd958929a5b8bf77b Mon Sep 17 00:00:00 2001 From: GeorryHuang <215175212@qq.com> Date: Fri, 30 Oct 2020 04:09:18 +0800 Subject: [PATCH 471/769] HBASE-25090 CompactionConfiguration logs unrealistic store file sizes (#2595) Signed-off-by: stack --- .../regionserver/compactions/CompactionConfiguration.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index dbc5b1fea1b1..75966b9e7467 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -180,7 +180,9 @@ public class CompactionConfiguration { @Override public String toString() { return String.format( - "size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" + "size [minCompactSize:%s, maxCompactSize:%s, offPeakMaxCompactSize:%s);" + + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" + + " ratio %f; off-peak ratio %f; throttle point %d;" + " major period %d, major jitter %f, min locality to compact %f;" + " tiered compaction: max_age %d, incoming window min %d," + " compaction policy for tiered window %s, single output for minor %b," From 9e03808dfbebde49e990308f9d0b296439bb8237 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Wed, 30 Sep 2020 16:48:01 -0700 Subject: [PATCH 472/769] HBASE-24419 Normalizer merge plans should consider more than 2 regions when possible The core change here is to the loop in `SimpleRegionNormalizer#computeMergeNormalizationPlans`. It's a nested loop that walks the table's region chain once, looking for contiguous sequences of regions that meet the criteria for merge. The outer loop tracks the starting point of the next sequence, the inner loop looks for the end of that sequence. A single sequence becomes an instance of `MergeNormalizationPlan`. Signed-off-by: Huaxiang Sun --- .../apache/hadoop/hbase/MatcherPredicate.java | 65 +++++++ .../normalizer/MergeNormalizationPlan.java | 6 + .../normalizer/NormalizationTarget.java | 3 +- .../normalizer/SimpleRegionNormalizer.java | 82 ++++++--- .../TestSimpleRegionNormalizer.java | 64 ++++++- .../TestSimpleRegionNormalizerOnCluster.java | 167 +++++++++++------- 6 files changed, 287 insertions(+), 100 deletions(-) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java new file mode 100644 index 000000000000..695c026992ac --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.function.Supplier; +import org.apache.yetus.audience.InterfaceAudience; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.StringDescription; + +/** + * An implementation of {@link Waiter.ExplainingPredicate} that uses Hamcrest {@link Matcher} for + * both predicate evaluation and explanation. + * + * @param The type of value to be evaluated via {@link Matcher}. + */ +@InterfaceAudience.Private +public class MatcherPredicate implements Waiter.ExplainingPredicate { + + private final String reason; + private final Supplier supplier; + private final Matcher matcher; + private T currentValue; + + public MatcherPredicate(final Supplier supplier, final Matcher matcher) { + this("", supplier, matcher); + } + + public MatcherPredicate(final String reason, final Supplier supplier, + final Matcher matcher) { + this.reason = reason; + this.supplier = supplier; + this.matcher = matcher; + this.currentValue = null; + } + + @Override public boolean evaluate() { + currentValue = supplier.get(); + return matcher.matches(currentValue); + } + + @Override public String explainFailure() { + final Description description = new StringDescription() + .appendText(reason) + .appendText("\nExpected: ").appendDescriptionOf(matcher) + .appendText("\n but: "); + matcher.describeMismatch(currentValue, description); + return description.toString(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java index 677b9ec8052e..f5a72863fe8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java @@ -96,6 +96,12 @@ static class Builder { private final List normalizationTargets = new LinkedList<>(); + public Builder setTargets(final List targets) { + normalizationTargets.clear(); + normalizationTargets.addAll(targets); + return this; + } + public Builder addTarget(final RegionInfo regionInfo, final long regionSizeMb) { normalizationTargets.add(new NormalizationTarget(regionInfo, regionSizeMb)); return this; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java index 9e4b3f426403..95490288cef9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import java.util.Objects; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -33,7 +34,7 @@ class NormalizationTarget { private final long regionSizeMb; NormalizationTarget(final RegionInfo regionInfo, final long regionSizeMb) { - this.regionInfo = regionInfo; + this.regionInfo = Objects.requireNonNull(regionInfo); this.regionSizeMb = regionSizeMb; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index a641a0aa25b7..062e401ba812 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import static org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils.isEmpty; import java.io.IOException; import java.time.Instant; import java.time.Period; import java.util.ArrayList; import java.util.Collections; +import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.function.BooleanSupplier; @@ -41,7 +43,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** * Simple implementation of region normalizer. Logic in use: @@ -77,7 +78,7 @@ class SimpleRegionNormalizer implements RegionNormalizer { private boolean mergeEnabled; private int minRegionCount; private Period mergeMinRegionAge; - private int mergeMinRegionSizeMb; + private long mergeMinRegionSizeMb; public SimpleRegionNormalizer() { splitEnabled = DEFAULT_SPLIT_ENABLED; @@ -124,10 +125,10 @@ private static Period parseMergeMinRegionAge(final Configuration conf) { return Period.ofDays(settledValue); } - private static int parseMergeMinRegionSizeMb(final Configuration conf) { - final int parsedValue = - conf.getInt(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB); - final int settledValue = Math.max(0, parsedValue); + private static long parseMergeMinRegionSizeMb(final Configuration conf) { + final long parsedValue = + conf.getLong(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB); + final long settledValue = Math.max(0, parsedValue); if (parsedValue != settledValue) { warnInvalidValue(MERGE_MIN_REGION_SIZE_MB_KEY, parsedValue, settledValue); } @@ -171,7 +172,7 @@ public Period getMergeMinRegionAge() { /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_SIZE_MB_KEY}. */ - public int getMergeMinRegionSizeMb() { + public long getMergeMinRegionSizeMb() { return mergeMinRegionSizeMb; } @@ -198,7 +199,7 @@ public List computePlansForTable(final TableName table) { } final NormalizeContext ctx = new NormalizeContext(table); - if (CollectionUtils.isEmpty(ctx.getTableRegions())) { + if (isEmpty(ctx.getTableRegions())) { return Collections.emptyList(); } @@ -251,7 +252,7 @@ private boolean proceedWithMergePlanning() { * Also make sure tableRegions contains regions of the same table */ private double getAverageRegionSizeMb(final List tableRegions) { - if (CollectionUtils.isEmpty(tableRegions)) { + if (isEmpty(tableRegions)) { throw new IllegalStateException( "Cannot calculate average size of a table without any regions."); } @@ -315,35 +316,60 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r * towards target average or target region count. */ private List computeMergeNormalizationPlans(final NormalizeContext ctx) { - if (ctx.getTableRegions().size() < minRegionCount) { + if (isEmpty(ctx.getTableRegions()) || ctx.getTableRegions().size() < minRegionCount) { LOG.debug("Table {} has {} regions, required min number of regions for normalizer to run" + " is {}, not computing merge plans.", ctx.getTableName(), ctx.getTableRegions().size(), minRegionCount); return Collections.emptyList(); } - final double avgRegionSizeMb = ctx.getAverageRegionSizeMb(); + final long avgRegionSizeMb = (long) ctx.getAverageRegionSizeMb(); + if (avgRegionSizeMb < mergeMinRegionSizeMb) { + return Collections.emptyList(); + } LOG.debug("Computing normalization plan for table {}. average region size: {}, number of" + " regions: {}.", ctx.getTableName(), avgRegionSizeMb, ctx.getTableRegions().size()); - final List plans = new ArrayList<>(); - for (int candidateIdx = 0; candidateIdx < ctx.getTableRegions().size() - 1; candidateIdx++) { - final RegionInfo current = ctx.getTableRegions().get(candidateIdx); - final RegionInfo next = ctx.getTableRegions().get(candidateIdx + 1); - if (skipForMerge(ctx.getRegionStates(), current) - || skipForMerge(ctx.getRegionStates(), next)) { - continue; + // this nested loop walks the table's region chain once, looking for contiguous sequences of + // regions that meet the criteria for merge. The outer loop tracks the starting point of the + // next sequence, the inner loop looks for the end of that sequence. A single sequence becomes + // an instance of MergeNormalizationPlan. + + final List plans = new LinkedList<>(); + final List rangeMembers = new LinkedList<>(); + long sumRangeMembersSizeMb; + int current = 0; + for (int rangeStart = 0; + rangeStart < ctx.getTableRegions().size() - 1 && current < ctx.getTableRegions().size();) { + // walk the region chain looking for contiguous sequences of regions that can be merged. + rangeMembers.clear(); + sumRangeMembersSizeMb = 0; + for (current = rangeStart; current < ctx.getTableRegions().size(); current++) { + final RegionInfo regionInfo = ctx.getTableRegions().get(current); + final long regionSizeMb = getRegionSizeMB(regionInfo); + if (skipForMerge(ctx.getRegionStates(), regionInfo)) { + // this region cannot participate in a range. resume the outer loop. + rangeStart = Math.max(current, rangeStart + 1); + break; + } + if (rangeMembers.isEmpty() // when there are no range members, seed the range with whatever + // we have. this way we're prepared in case the next region is + // 0-size. + || regionSizeMb == 0 // always add an empty region to the current range. + || (regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb)) { // add the current region + // to the range when + // there's capacity + // remaining. + rangeMembers.add(new NormalizationTarget(regionInfo, regionSizeMb)); + sumRangeMembersSizeMb += regionSizeMb; + continue; + } + // we have accumulated enough regions to fill a range. resume the outer loop. + rangeStart = Math.max(current, rangeStart + 1); + break; } - final long currentSizeMb = getRegionSizeMB(current); - final long nextSizeMb = getRegionSizeMB(next); - // always merge away empty regions when they present themselves. - if (currentSizeMb == 0 || nextSizeMb == 0 || currentSizeMb + nextSizeMb < avgRegionSizeMb) { - final MergeNormalizationPlan plan = new MergeNormalizationPlan.Builder() - .addTarget(current, currentSizeMb) - .addTarget(next, nextSizeMb) - .build(); - plans.add(plan); - candidateIdx++; + if (rangeMembers.size() > 1) { + plans.add(new MergeNormalizationPlan.Builder().setTargets(rangeMembers).build()); } } return plans; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index f263cbc4fdfd..33b32972542e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -33,6 +33,7 @@ import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.RETURNS_DEEP_STUBS; import static org.mockito.Mockito.when; @@ -225,7 +226,7 @@ public void testSplitOfLargeRegion() { } @Test - public void testSplitWithTargetRegionSize() throws Exception { + public void testWithTargetRegionSize() throws Exception { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 6); final Map regionSizes = @@ -251,8 +252,6 @@ public void testSplitWithTargetRegionSize() throws Exception { new MergeNormalizationPlan.Builder() .addTarget(regionInfos.get(0), 20) .addTarget(regionInfos.get(1), 40) - .build(), - new MergeNormalizationPlan.Builder() .addTarget(regionInfos.get(2), 60) .addTarget(regionInfos.get(3), 80) .build())); @@ -392,7 +391,7 @@ public void testHonorsMergeMinRegionSize() { } @Test - public void testMergeEmptyRegions() { + public void testMergeEmptyRegions0() { conf.setBoolean(SPLIT_ENABLED_KEY, false); conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); final TableName tableName = name.getTableName(); @@ -418,6 +417,63 @@ public void testMergeEmptyRegions() { .build())); } + @Test + public void testMergeEmptyRegions1() { + conf.setBoolean(SPLIT_ENABLED_KEY, false); + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 8); + final Map regionSizes = + createRegionSizesMap(regionInfos, 0, 1, 10, 0, 9, 0, 10, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + + assertFalse(normalizer.isSplitEnabled()); + assertEquals(0, normalizer.getMergeMinRegionSizeMb()); + assertThat(normalizer.computePlansForTable(tableName), contains( + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 1) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 10) + .addTarget(regionInfos.get(3), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(4), 9) + .addTarget(regionInfos.get(5), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(6), 10) + .addTarget(regionInfos.get(7), 0) + .build())); + } + + @Test + public void testSplitAndMultiMerge() { + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 8); + final Map regionSizes = + createRegionSizesMap(regionInfos, 3, 1, 1, 30, 9, 3, 1, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + + assertTrue(normalizer.isMergeEnabled()); + assertTrue(normalizer.isSplitEnabled()); + assertEquals(0, normalizer.getMergeMinRegionSizeMb()); + assertThat(normalizer.computePlansForTable(tableName), contains( + new SplitNormalizationPlan(regionInfos.get(3), 30), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 3) + .addTarget(regionInfos.get(1), 1) + .addTarget(regionInfos.get(2), 1) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(5), 3) + .addTarget(regionInfos.get(6), 1) + .addTarget(regionInfos.get(7), 0) + .build())); + } + // This test is to make sure that normalizer is only going to merge adjacent regions. @Test public void testNormalizerCannotMergeNonAdjacentRegions() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java index f5feb59ca329..3cc9168b4343 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hbase.master.normalizer; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -29,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MatcherPredicate; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ServerName; @@ -55,6 +61,8 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LoadTestKVGenerator; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -144,7 +152,7 @@ public void testHonorsNormalizerTableSetting() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because normalizer has not been enabled on it. @@ -161,7 +169,7 @@ public void testHonorsNormalizerTableSetting() throws Exception { tn2RegionCount, getRegionCount(tn2)); LOG.debug("waiting for t3 to settle..."); - waitForTableRegionCount(tn3, tn3RegionCount); + waitForTableRegionCount(tn3, comparesEqualTo(tn3RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); @@ -198,7 +206,7 @@ void testRegionNormalizationSplit(boolean limitedByQuota) throws Exception { currentRegionCount, getRegionCount(tableName)); } else { - waitForTableSplit(tableName, currentRegionCount + 1); + waitForTableRegionCount(tableName, greaterThanOrEqualTo(currentRegionCount + 1)); assertEquals( tableName + " should have split.", currentRegionCount + 1, @@ -216,7 +224,7 @@ public void testRegionNormalizationMerge() throws Exception { final int currentRegionCount = createTableBegsMerge(tableName); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); - waitForTableMerge(tableName, currentRegionCount - 1); + waitForTableRegionCount(tableName, lessThanOrEqualTo(currentRegionCount - 1)); assertEquals( tableName + " should have merged.", currentRegionCount - 1, @@ -242,7 +250,7 @@ public void testHonorsNamespaceFilter() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because it's not a member of the target namespace. @@ -250,7 +258,7 @@ public void testHonorsNamespaceFilter() throws Exception { tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); - waitForTableRegionCount(tn2, tn2RegionCount); + waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); @@ -271,7 +279,7 @@ public void testHonorsPatternFilter() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because it fails filter. @@ -279,7 +287,7 @@ public void testHonorsPatternFilter() throws Exception { tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); - waitForTableRegionCount(tn2, tn2RegionCount); + waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); @@ -300,7 +308,7 @@ public void testHonorsNameFilter() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); - waitForTableSplit(tn1, tn1RegionCount + 1); + waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1)); // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn3RegionCount number of regions because it fails filter: @@ -308,13 +316,33 @@ public void testHonorsNameFilter() throws Exception { tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); - waitForTableRegionCount(tn2, tn2RegionCount); + waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); dropIfExists(tn2); } } + /** + * A test for when a region is the target of both a split and a merge plan. Does not define + * expected behavior, only that some change is applied to the table. + */ + @Test + public void testTargetOfSplitAndMerge() throws Exception { + final TableName tn = TableName.valueOf(name.getMethodName()); + try { + final int tnRegionCount = createTableTargetOfSplitAndMerge(tn); + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize().get()); + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( + "expected " + tn + " to split or merge (probably split)", + () -> getRegionCountUnchecked(tn), + not(comparesEqualTo(tnRegionCount)))); + } finally { + dropIfExists(tn); + } + } + private static TableName buildTableNameForQuotaTest(final String methodName) throws Exception { String nsp = "np2"; NamespaceDescriptor nspDesc = @@ -326,74 +354,30 @@ private static TableName buildTableNameForQuotaTest(final String methodName) thr } private static void waitForSkippedSplits(final HMaster master, - final long existingSkippedSplitCount) throws Exception { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override public String explainFailure() { - return "waiting to observe split attempt and skipped."; - } - @Override public boolean evaluate() { - final long skippedSplitCount = master.getRegionNormalizerManager() - .getSkippedCount(PlanType.SPLIT); - return skippedSplitCount > existingSkippedSplitCount; - } - }); + final long existingSkippedSplitCount) { + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( + "waiting to observe split attempt and skipped.", + () -> master.getRegionNormalizerManager().getSkippedCount(PlanType.SPLIT), + Matchers.greaterThan(existingSkippedSplitCount))); } private static void waitForTableRegionCount(final TableName tableName, - final int targetRegionCount) throws IOException { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override - public String explainFailure() { - return "expected " + targetRegionCount + " number of regions for table " + tableName; - } - - @Override - public boolean evaluate() throws IOException { - final int currentRegionCount = getRegionCount(tableName); - return currentRegionCount == targetRegionCount; - } - }); - } - - private static void waitForTableSplit(final TableName tableName, final int targetRegionCount) - throws IOException { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override - public String explainFailure() { - return "expected normalizer to split region."; - } - - @Override - public boolean evaluate() throws IOException { - final int currentRegionCount = getRegionCount(tableName); - return currentRegionCount >= targetRegionCount; - } - }); - } - - private static void waitForTableMerge(final TableName tableName, final int targetRegionCount) - throws IOException { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override - public String explainFailure() { - return "expected normalizer to merge regions."; - } - - @Override - public boolean evaluate() throws IOException { - final int currentRegionCount = getRegionCount(tableName); - return currentRegionCount <= targetRegionCount; - } - }); + Matcher matcher) { + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( + "region count for table " + tableName + " does not match expected", + () -> getRegionCountUnchecked(tableName), + matcher)); } private static List generateTestData(final TableName tableName, final int... regionSizesMb) throws IOException { final List generatedRegions; final int numRegions = regionSizesMb.length; + LOG.debug("generating test data into {}, {} regions of sizes (mb) {}", tableName, numRegions, + regionSizesMb); try (Table ignored = TEST_UTIL.createMultiRegionTable(tableName, FAMILY_NAME, numRegions)) { // Need to get sorted list of regions here - generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName); + generatedRegions = new ArrayList<>(TEST_UTIL.getHBaseCluster().getRegions(tableName)); generatedRegions.sort(Comparator.comparing(HRegion::getRegionInfo, RegionInfo.COMPARATOR)); assertEquals(numRegions, generatedRegions.size()); for (int i = 0; i < numRegions; i++) { @@ -407,6 +391,7 @@ private static List generateTestData(final TableName tableName, private static void generateTestData(Region region, int numRows) throws IOException { // generating 1Mb values + LOG.debug("writing {}mb to {}", numRows, region); LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(1024 * 1024, 1024 * 1024); for (int i = 0; i < numRows; ++i) { byte[] key = Bytes.add(region.getRegionInfo().getStartKey(), Bytes.toBytes(i)); @@ -513,6 +498,46 @@ private static int createTableBegsMerge(final TableName tableName) throws Except return 5; } + /** + * Create a table with 4 regions, having region sizes so as to provoke a split of the largest + * region and a merge of an empty region into the largest. + *

      + *
    • total table size: 14
    • + *
    • average region size: 3.5
    • + *
    + */ + private static int createTableTargetOfSplitAndMerge(final TableName tableName) throws Exception { + final int[] regionSizesMb = { 10, 0, 2, 2 }; + final List generatedRegions = generateTestData(tableName, regionSizesMb); + assertEquals(4, getRegionCount(tableName)); + admin.flush(tableName).get(); + + final TableDescriptor td = TableDescriptorBuilder + .newBuilder(admin.getDescriptor(tableName).get()) + .setNormalizationEnabled(true) + .build(); + admin.modifyTable(td).get(); + + // make sure relatively accurate region statistics are available for the test table. use + // the last/largest region as clue. + LOG.debug("waiting for region statistics to settle."); + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { + @Override public String explainFailure() { + return "expected largest region to be >= 10mb."; + } + @Override public boolean evaluate() { + for (int i = 0; i < generatedRegions.size(); i++) { + final RegionInfo regionInfo = generatedRegions.get(i).getRegionInfo(); + if (!(getRegionSizeMB(master, regionInfo) >= regionSizesMb[i])) { + return false; + } + } + return true; + } + }); + return 4; + } + private static void dropIfExists(final TableName tableName) throws Exception { if (tableName != null && admin.tableExists(tableName).get()) { if (admin.isTableEnabled(tableName).get()) { @@ -527,4 +552,12 @@ private static int getRegionCount(TableName tableName) throws IOException { return locator.getAllRegionLocations().size(); } } + + private static int getRegionCountUnchecked(final TableName tableName) { + try { + return getRegionCount(tableName); + } catch (IOException e) { + throw new RuntimeException(e); + } + } } From 3a484d2ed77f396146acaa454e10931e49327c90 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 16 Apr 2020 09:21:52 -0700 Subject: [PATCH 473/769] HBASE-24200 Upgrade to Yetus 0.12.0 Signed-off-by: Sean Busbey Signed-off-by: Duo Zhang --- dev-support/Jenkinsfile | 2 +- dev-support/Jenkinsfile_GitHub | 2 +- dev-support/create-release/do-release.sh | 4 ++-- dev-support/create-release/hbase-rm/Dockerfile | 2 +- dev-support/jenkins_precommit_jira_yetus.sh | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 9f23a58873cd..f3de8edffcbe 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -31,7 +31,7 @@ pipeline { disableConcurrentBuilds() } environment { - YETUS_RELEASE = '0.11.1' + YETUS_RELEASE = '0.12.0' // where we'll write everything from different steps. Need a copy here so the final step can check for success/failure. OUTPUT_DIR_RELATIVE_GENERAL = 'output-general' OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7' diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index a15ee9e84957..d25386717739 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -37,7 +37,7 @@ pipeline { DOCKERFILE_REL = "${SRC_REL}/dev-support/docker/Dockerfile" YETUS_DRIVER_REL = "${SRC_REL}/dev-support/jenkins_precommit_github_yetus.sh" // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION = 'rel/0.11.1' + YETUS_VERSION = 'rel/0.12.0' GENERAL_CHECK_PLUGINS = 'all,-compile,-javac,-javadoc,-jira,-shadedjars,-unit' JDK_SPECIFIC_PLUGINS = 'compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars,unit' // output from surefire; sadly the archive function in yetus only works on file names. diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index ebab9335cc27..9500801c247b 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -118,9 +118,9 @@ function should_build { if should_build "tag" && [ "$SKIP_TAG" = 0 ]; then if [ -z "${YETUS_HOME}" ] && [ "${RUNNING_IN_DOCKER}" != "1" ]; then - declare local_yetus="/opt/apache-yetus/0.11.1/" + declare local_yetus="/opt/apache-yetus/0.12.0/" if [ "$(get_host_os)" = "DARWIN" ]; then - local_yetus="/usr/local/Cellar/yetus/0.11.1/" + local_yetus="/usr/local/Cellar/yetus/0.12.0/" fi YETUS_HOME="$(read_config "YETUS_HOME not defined. Absolute path to local install of Apache Yetus" "${local_yetus}")" export YETUS_HOME diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index 630b8f17332e..2c29974cfbba 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -44,7 +44,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && pip install \ python-dateutil==2.8.1 # Install Apache Yetus -ENV YETUS_VERSION 0.11.1 +ENV YETUS_VERSION 0.12.0 SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN wget -qO- "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ tar xvz -C /opt diff --git a/dev-support/jenkins_precommit_jira_yetus.sh b/dev-support/jenkins_precommit_jira_yetus.sh index 9961c3c98cfc..d721ac8faba6 100755 --- a/dev-support/jenkins_precommit_jira_yetus.sh +++ b/dev-support/jenkins_precommit_jira_yetus.sh @@ -32,7 +32,7 @@ export MAVEN_HOME=/home/jenkins/tools/maven/apache-maven-3.0.5 #export PATH=$PATH:${JAVA_HOME}/bin:${MAVEN_HOME}/bin: export PATH=$PATH:${MAVEN_HOME}/bin: -YETUS_RELEASE=0.11.1 +YETUS_RELEASE=0.12.0 COMPONENT=${WORKSPACE}/component TEST_FRAMEWORK=${WORKSPACE}/test_framework From 8de25f627344c8a5130271b185ae5bcd2666ce2c Mon Sep 17 00:00:00 2001 From: Pankaj Date: Fri, 30 Oct 2020 05:04:23 +0530 Subject: [PATCH 474/769] HBASE-24977 Meta table shouldn't be modified as read only (#2537) Signed-off-by: stack --- .../hbase/util/TableDescriptorChecker.java | 6 ++++ .../hadoop/hbase/TestHBaseMetaEdit.java | 29 +++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index 906ae454d6a8..c69d38a8ec25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -150,6 +151,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) warnOrThrowExceptionForFailure(logWarn, message, null); } + // Meta table shouldn't be set as read only, otherwise it will impact region assignments + if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) { + warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null); + } + for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { if (hcd.getTimeToLive() <= 0) { String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java index 6977452724d3..33c0f1041b00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -133,4 +135,31 @@ public void testEditMeta() throws IOException { assertTrue(hioe.getMessage().contains("Delete of hbase:meta")); } } + + /** + * Validate whether meta table can be altered as READ only, shouldn't be allowed otherwise it will + * break assignment functionalities. See HBASE-24977. + */ + @Test + public void testAlterMetaWithReadOnly() throws IOException { + Admin admin = UTIL.getAdmin(); + TableDescriptor origMetaTableDesc = admin.getDescriptor(TableName.META_TABLE_NAME); + assertFalse(origMetaTableDesc.isReadOnly()); + TableDescriptor newTD = + TableDescriptorBuilder.newBuilder(origMetaTableDesc).setReadOnly(true).build(); + try { + admin.modifyTable(newTD); + fail("Meta table can't be set as read only"); + } catch (Exception e) { + assertFalse(admin.getDescriptor(TableName.META_TABLE_NAME).isReadOnly()); + } + + // Create a table to check region assignment & meta operation + TableName tableName = TableName.valueOf("tempTable"); + TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setReadOnly(true) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .build(); + UTIL.getAdmin().createTable(td); + UTIL.deleteTable(tableName); + } } From 440d960333625263801468d71e47265862125b63 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 30 Oct 2020 10:41:56 -0700 Subject: [PATCH 475/769] HBASE-25167 Normalizer support for hot config reloading (#2523) Wire up the `ConfigurationObserver` chain for `RegionNormalizerManager`. The following configuration keys support hot-reloading: * hbase.normalizer.throughput.max_bytes_per_sec * hbase.normalizer.split.enabled * hbase.normalizer.merge.enabled * hbase.normalizer.min.region.count * hbase.normalizer.merge.min_region_age.days * hbase.normalizer.merge.min_region_size.mb Note that support for `hbase.normalizer.period` is not provided here. Support would need to be implemented generally for the `Chore` subsystem. Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani Signed-off-by: Aman Poonia --- .../hbase/conf/ConfigurationManager.java | 27 +-- .../hbase/conf/ConfigurationObserver.java | 4 +- .../hbase/conf/TestConfigurationManager.java | 11 +- .../apache/hadoop/hbase/master/HMaster.java | 1 + .../normalizer/RegionNormalizerManager.java | 26 ++- .../normalizer/RegionNormalizerWorker.java | 41 ++++- .../normalizer/SimpleRegionNormalizer.java | 159 ++++++++++++++---- ...ormalizerManagerConfigurationObserver.java | 110 ++++++++++++ 8 files changed, 319 insertions(+), 60 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java index 511679f5b547..2c36c5308fa3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,28 +21,29 @@ import java.util.Set; import java.util.WeakHashMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Maintains the set of all the classes which would like to get notified * when the Configuration is reloaded from the disk in the Online Configuration * Change mechanism, which lets you update certain configuration properties * on-the-fly, without having to restart the cluster. - * + *

    * If a class has configuration properties which you would like to be able to * change on-the-fly, do the following: - * 1. Implement the {@link ConfigurationObserver} interface. This would require + *

      + *
    1. Implement the {@link ConfigurationObserver} interface. This would require * you to implement the * {@link ConfigurationObserver#onConfigurationChange(Configuration)} * method. This is a callback that is used to notify your class' instance * that the configuration has changed. In this method, you need to check * if the new values for the properties that are of interest to your class * are different from the cached values. If yes, update them. - * + *
      * However, be careful with this. Certain properties might be trivially * mutable online, but others might not. Two properties might be trivially * mutable by themselves, but not when changed together. For example, if a @@ -51,21 +52,23 @@ * yet updated "b", it might make a decision on the basis of a new value of * "a", and an old value of "b". This might introduce subtle bugs. This * needs to be dealt on a case-by-case basis, and this class does not provide - * any protection from such cases. + * any protection from such cases.
    2. * - * 2. Register the appropriate instance of the class with the + *
    3. Register the appropriate instance of the class with the * {@link ConfigurationManager} instance, using the * {@link ConfigurationManager#registerObserver(ConfigurationObserver)} * method. Be careful not to do this in the constructor, as you might cause * the 'this' reference to escape. Use a factory method, or an initialize() - * method which is called after the construction of the object. + * method which is called after the construction of the object.
    4. * - * 3. Deregister the instance using the + *
    5. Deregister the instance using the * {@link ConfigurationManager#deregisterObserver(ConfigurationObserver)} * method when it is going out of scope. In case you are not able to do that * for any reason, it is still okay, since entries for dead observers are * automatically collected during GC. But nonetheless, it is still a good - * practice to deregister your observer, whenever possible. + * practice to deregister your observer, whenever possible.
    6. + *
    + *

    */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -118,8 +121,8 @@ public void notifyAllObservers(Configuration conf) { observer.onConfigurationChange(conf); } } catch (Throwable t) { - LOG.error("Encountered a throwable while notifying observers: " + " of type : " + - observer.getClass().getCanonicalName() + "(" + observer + ")", t); + LOG.error("Encountered a throwable while notifying observers: of type : {}({})", + observer.getClass().getCanonicalName(), observer, t); } } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java index 2370a21af033..0d1d8ce5a783 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,7 @@ /** * Every class that wants to observe changes in Configuration properties, * must implement interface (and also, register itself with the - * ConfigurationManager object. + * {@link ConfigurationManager}. */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java index 20dd02442631..21d74806ba04 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -39,9 +38,9 @@ public class TestConfigurationManager { private static final Logger LOG = LoggerFactory.getLogger(TestConfigurationManager.class); - class DummyConfigurationObserver implements ConfigurationObserver { + static class DummyConfigurationObserver implements ConfigurationObserver { private boolean notifiedOnChange = false; - private ConfigurationManager cm; + private final ConfigurationManager cm; public DummyConfigurationObserver(ConfigurationManager cm) { this.cm = cm; @@ -63,11 +62,11 @@ public void resetNotifiedOnChange() { } public void register() { - this.cm.registerObserver(this); + cm.registerObserver(this); } public void deregister() { - this.cm.deregisterObserver(this); + cm.deregisterObserver(this); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 58a805334f36..f9123046eef2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -785,6 +785,7 @@ private void initializeZKBasedSystemTrackers() this.regionNormalizerManager = RegionNormalizerFactory.createNormalizerManager(conf, zooKeeper, this); + this.configurationManager.registerObserver(regionNormalizerManager); this.regionNormalizerManager.start(); this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java index e818519d6513..b4d16e796731 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerManager.java @@ -22,8 +22,11 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -35,7 +38,7 @@ * This class encapsulates the details of the {@link RegionNormalizer} subsystem. */ @InterfaceAudience.Private -public class RegionNormalizerManager { +public class RegionNormalizerManager implements PropagatingConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerManager.class); private final RegionNormalizerTracker regionNormalizerTracker; @@ -48,7 +51,7 @@ public class RegionNormalizerManager { private boolean started = false; private boolean stopped = false; - public RegionNormalizerManager( + RegionNormalizerManager( @NonNull final RegionNormalizerTracker regionNormalizerTracker, @Nullable final RegionNormalizerChore regionNormalizerChore, @Nullable final RegionNormalizerWorkQueue workQueue, @@ -67,6 +70,25 @@ public RegionNormalizerManager( .build()); } + @Override + public void registerChildren(ConfigurationManager manager) { + if (worker != null) { + manager.registerObserver(worker); + } + } + + @Override + public void deregisterChildren(ConfigurationManager manager) { + if (worker != null) { + manager.deregisterObserver(worker); + } + } + + @Override + public void onConfigurationChange(Configuration conf) { + // no configuration managed here directly. + } + public void start() { synchronized (startStopLock) { if (started) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java index 30f9fc25364d..408317a31f87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizerWorker.java @@ -26,6 +26,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -39,7 +42,7 @@ * and executes the resulting {@link NormalizationPlan}s. */ @InterfaceAudience.Private -class RegionNormalizerWorker implements Runnable { +class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnable { private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerWorker.class); static final String RATE_LIMIT_BYTES_PER_SEC_KEY = @@ -70,7 +73,32 @@ class RegionNormalizerWorker implements Runnable { this.rateLimiter = loadRateLimiter(configuration); } + @Override + public void registerChildren(ConfigurationManager manager) { + if (regionNormalizer instanceof ConfigurationObserver) { + final ConfigurationObserver observer = (ConfigurationObserver) regionNormalizer; + manager.registerObserver(observer); + } + } + + @Override + public void deregisterChildren(ConfigurationManager manager) { + if (regionNormalizer instanceof ConfigurationObserver) { + final ConfigurationObserver observer = (ConfigurationObserver) regionNormalizer; + manager.deregisterObserver(observer); + } + } + + @Override + public void onConfigurationChange(Configuration conf) { + rateLimiter.setRate(loadRateLimit(conf)); + } + private static RateLimiter loadRateLimiter(final Configuration configuration) { + return RateLimiter.create(loadRateLimit(configuration)); + } + + private static long loadRateLimit(final Configuration configuration) { long rateLimitBytes = configuration.getLongBytes(RATE_LIMIT_BYTES_PER_SEC_KEY, RATE_UNLIMITED_BYTES); long rateLimitMbs = rateLimitBytes / 1_000_000L; @@ -82,7 +110,7 @@ private static RateLimiter loadRateLimiter(final Configuration configuration) { } LOG.info("Normalizer rate limit set to {}", rateLimitBytes == RATE_UNLIMITED_BYTES ? "unlimited" : rateLimitMbs + " MB/sec"); - return RateLimiter.create(rateLimitMbs); + return rateLimitMbs; } /** @@ -116,6 +144,15 @@ long getMergePlanCount() { return mergePlanCount; } + /** + * Used in test only. This field is exposed to the test, as opposed to tracking the current + * configuration value beside the RateLimiter instance and managing synchronization to keep the + * two in sync. + */ + RateLimiter getRateLimiter() { + return rateLimiter; + } + @Override public void run() { while (true) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 062e401ba812..6d7387b7f11b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.assignment.RegionStates; @@ -56,7 +57,7 @@ * */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -class SimpleRegionNormalizer implements RegionNormalizer { +class SimpleRegionNormalizer implements RegionNormalizer, ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class); static final String SPLIT_ENABLED_KEY = "hbase.normalizer.split.enabled"; @@ -72,25 +73,17 @@ class SimpleRegionNormalizer implements RegionNormalizer { static final String MERGE_MIN_REGION_SIZE_MB_KEY = "hbase.normalizer.merge.min_region_size.mb"; static final int DEFAULT_MERGE_MIN_REGION_SIZE_MB = 1; - private Configuration conf; private MasterServices masterServices; - private boolean splitEnabled; - private boolean mergeEnabled; - private int minRegionCount; - private Period mergeMinRegionAge; - private long mergeMinRegionSizeMb; + private NormalizerConfiguration normalizerConfiguration; public SimpleRegionNormalizer() { - splitEnabled = DEFAULT_SPLIT_ENABLED; - mergeEnabled = DEFAULT_MERGE_ENABLED; - minRegionCount = DEFAULT_MIN_REGION_COUNT; - mergeMinRegionAge = Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS); - mergeMinRegionSizeMb = DEFAULT_MERGE_MIN_REGION_SIZE_MB; + masterServices = null; + normalizerConfiguration = new NormalizerConfiguration(); } @Override public Configuration getConf() { - return conf; + return normalizerConfiguration.getConf(); } @Override @@ -98,12 +91,13 @@ public void setConf(final Configuration conf) { if (conf == null) { return; } - this.conf = conf; - splitEnabled = conf.getBoolean(SPLIT_ENABLED_KEY, DEFAULT_SPLIT_ENABLED); - mergeEnabled = conf.getBoolean(MERGE_ENABLED_KEY, DEFAULT_MERGE_ENABLED); - minRegionCount = parseMinRegionCount(conf); - mergeMinRegionAge = parseMergeMinRegionAge(conf); - mergeMinRegionSizeMb = parseMergeMinRegionSizeMb(conf); + normalizerConfiguration = new NormalizerConfiguration(conf, normalizerConfiguration); + } + + @Override + public void onConfigurationChange(Configuration conf) { + LOG.debug("Updating configuration parameters according to new configuration instance."); + setConf(conf); } private static int parseMinRegionCount(final Configuration conf) { @@ -141,39 +135,46 @@ private static void warnInvalidValue(final String key, final T parsedValue, key, parsedValue, settledValue); } + private static void logConfigurationUpdated(final String key, final T oldValue, + final T newValue) { + if (!Objects.equals(oldValue, newValue)) { + LOG.info("Updated configuration for key '{}' from {} to {}", key, oldValue, newValue); + } + } + /** * Return this instance's configured value for {@value #SPLIT_ENABLED_KEY}. */ public boolean isSplitEnabled() { - return splitEnabled; + return normalizerConfiguration.isSplitEnabled(); } /** * Return this instance's configured value for {@value #MERGE_ENABLED_KEY}. */ public boolean isMergeEnabled() { - return mergeEnabled; + return normalizerConfiguration.isMergeEnabled(); } /** * Return this instance's configured value for {@value #MIN_REGION_COUNT_KEY}. */ public int getMinRegionCount() { - return minRegionCount; + return normalizerConfiguration.getMinRegionCount(); } /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}. */ public Period getMergeMinRegionAge() { - return mergeMinRegionAge; + return normalizerConfiguration.getMergeMinRegionAge(); } /** * Return this instance's configured value for {@value #MERGE_MIN_REGION_SIZE_MB_KEY}. */ public long getMergeMinRegionSizeMb() { - return mergeMinRegionSizeMb; + return normalizerConfiguration.getMergeMinRegionSizeMb(); } @Override @@ -292,8 +293,15 @@ private double getAverageRegionSizeMb(final List tableRegions) { /** * Determine if a {@link RegionInfo} should be considered for a merge operation. + *

    + * Callers beware: for safe concurrency, be sure to pass in the local instance of + * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ - private boolean skipForMerge(final RegionStates regionStates, final RegionInfo regionInfo) { + private boolean skipForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionStates regionStates, + final RegionInfo regionInfo + ) { final RegionState state = regionStates.getRegionState(regionInfo); final String name = regionInfo.getEncodedName(); return @@ -304,10 +312,10 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r () -> !Objects.equals(state.getState(), RegionState.State.OPEN), "skipping merge of region {} because it is not open.", name) || logTraceReason( - () -> !isOldEnoughForMerge(regionInfo), + () -> !isOldEnoughForMerge(normalizerConfiguration, regionInfo), "skipping merge of region {} because it is not old enough.", name) || logTraceReason( - () -> !isLargeEnoughForMerge(regionInfo), + () -> !isLargeEnoughForMerge(normalizerConfiguration, regionInfo), "skipping merge region {} because it is not large enough.", name); } @@ -316,15 +324,16 @@ private boolean skipForMerge(final RegionStates regionStates, final RegionInfo r * towards target average or target region count. */ private List computeMergeNormalizationPlans(final NormalizeContext ctx) { - if (isEmpty(ctx.getTableRegions()) || ctx.getTableRegions().size() < minRegionCount) { + final NormalizerConfiguration configuration = normalizerConfiguration; + if (ctx.getTableRegions().size() < configuration.getMinRegionCount()) { LOG.debug("Table {} has {} regions, required min number of regions for normalizer to run" - + " is {}, not computing merge plans.", ctx.getTableName(), ctx.getTableRegions().size(), - minRegionCount); + + " is {}, not computing merge plans.", ctx.getTableName(), + ctx.getTableRegions().size(), configuration.getMinRegionCount()); return Collections.emptyList(); } final long avgRegionSizeMb = (long) ctx.getAverageRegionSizeMb(); - if (avgRegionSizeMb < mergeMinRegionSizeMb) { + if (avgRegionSizeMb < configuration.getMergeMinRegionSizeMb()) { return Collections.emptyList(); } LOG.debug("Computing normalization plan for table {}. average region size: {}, number of" @@ -347,7 +356,7 @@ private List computeMergeNormalizationPlans(final NormalizeCo for (current = rangeStart; current < ctx.getTableRegions().size(); current++) { final RegionInfo regionInfo = ctx.getTableRegions().get(current); final long regionSizeMb = getRegionSizeMB(regionInfo); - if (skipForMerge(ctx.getRegionStates(), regionInfo)) { + if (skipForMerge(configuration, ctx.getRegionStates(), regionInfo)) { // this region cannot participate in a range. resume the outer loop. rangeStart = Math.max(current, rangeStart + 1); break; @@ -419,18 +428,28 @@ private List computeSplitNormalizationPlans(final NormalizeCo * Return {@code true} when {@code regionInfo} has a creation date that is old * enough to be considered for a merge operation, {@code false} otherwise. */ - private boolean isOldEnoughForMerge(final RegionInfo regionInfo) { + private static boolean isOldEnoughForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionInfo regionInfo + ) { final Instant currentTime = Instant.ofEpochMilli(EnvironmentEdgeManager.currentTime()); final Instant regionCreateTime = Instant.ofEpochMilli(regionInfo.getRegionId()); - return currentTime.isAfter(regionCreateTime.plus(mergeMinRegionAge)); + return currentTime.isAfter( + regionCreateTime.plus(normalizerConfiguration.getMergeMinRegionAge())); } /** * Return {@code true} when {@code regionInfo} has a size that is sufficient * to be considered for a merge operation, {@code false} otherwise. + *

    + * Callers beware: for safe concurrency, be sure to pass in the local instance of + * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ - private boolean isLargeEnoughForMerge(final RegionInfo regionInfo) { - return getRegionSizeMB(regionInfo) >= mergeMinRegionSizeMb; + private boolean isLargeEnoughForMerge( + final NormalizerConfiguration normalizerConfiguration, + final RegionInfo regionInfo + ) { + return getRegionSizeMB(regionInfo) >= normalizerConfiguration.getMergeMinRegionSizeMb(); } private static boolean logTraceReason(final BooleanSupplier predicate, final String fmtWhenTrue, @@ -442,6 +461,74 @@ private static boolean logTraceReason(final BooleanSupplier predicate, final Str return value; } + /** + * Holds the configuration values read from {@link Configuration}. Encapsulation in a POJO + * enables atomic hot-reloading of configs without locks. + */ + private static final class NormalizerConfiguration { + private final Configuration conf; + private final boolean splitEnabled; + private final boolean mergeEnabled; + private final int minRegionCount; + private final Period mergeMinRegionAge; + private final long mergeMinRegionSizeMb; + + private NormalizerConfiguration() { + conf = null; + splitEnabled = DEFAULT_SPLIT_ENABLED; + mergeEnabled = DEFAULT_MERGE_ENABLED; + minRegionCount = DEFAULT_MIN_REGION_COUNT; + mergeMinRegionAge = Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS); + mergeMinRegionSizeMb = DEFAULT_MERGE_MIN_REGION_SIZE_MB; + } + + private NormalizerConfiguration( + final Configuration conf, + final NormalizerConfiguration currentConfiguration + ) { + this.conf = conf; + splitEnabled = conf.getBoolean(SPLIT_ENABLED_KEY, DEFAULT_SPLIT_ENABLED); + mergeEnabled = conf.getBoolean(MERGE_ENABLED_KEY, DEFAULT_MERGE_ENABLED); + minRegionCount = parseMinRegionCount(conf); + mergeMinRegionAge = parseMergeMinRegionAge(conf); + mergeMinRegionSizeMb = parseMergeMinRegionSizeMb(conf); + logConfigurationUpdated(SPLIT_ENABLED_KEY, currentConfiguration.isSplitEnabled(), + splitEnabled); + logConfigurationUpdated(MERGE_ENABLED_KEY, currentConfiguration.isMergeEnabled(), + mergeEnabled); + logConfigurationUpdated(MIN_REGION_COUNT_KEY, currentConfiguration.getMinRegionCount(), + minRegionCount); + logConfigurationUpdated(MERGE_MIN_REGION_AGE_DAYS_KEY, + currentConfiguration.getMergeMinRegionAge(), mergeMinRegionAge); + logConfigurationUpdated(MERGE_MIN_REGION_SIZE_MB_KEY, + currentConfiguration.getMergeMinRegionSizeMb(), mergeMinRegionSizeMb); + } + + public Configuration getConf() { + return conf; + } + + public boolean isSplitEnabled() { + return splitEnabled; + } + + public boolean isMergeEnabled() { + return mergeEnabled; + } + + public int getMinRegionCount() { + return minRegionCount; + } + + public Period getMergeMinRegionAge() { + return mergeMinRegionAge; + } + + public long getMergeMinRegionSizeMb() { + return mergeMinRegionSizeMb; + } + } + /** * Inner class caries the state necessary to perform a single invocation of * {@link #computePlansForTable(TableName)}. Grabbing this data from the assignment manager diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java new file mode 100644 index 000000000000..00980233edce --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.normalizer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.RateLimiter; + +/** + * Test that configuration changes are propagated to all children. + */ +@Category({ MasterTests.class, SmallTests.class}) +public class TestRegionNormalizerManagerConfigurationObserver { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionNormalizerManagerConfigurationObserver.class); + + private static final HBaseTestingUtility testUtil = new HBaseTestingUtility(); + private static final Pattern rateLimitPattern = + Pattern.compile("RateLimiter\\[stableRate=(?.+)qps]"); + + private Configuration conf; + private SimpleRegionNormalizer normalizer; + @Mock private MasterServices masterServices; + @Mock private RegionNormalizerTracker tracker; + @Mock private RegionNormalizerChore chore; + @Mock private RegionNormalizerWorkQueue queue; + private RegionNormalizerWorker worker; + private ConfigurationManager configurationManager; + + @Before + public void before() { + MockitoAnnotations.initMocks(this); + conf = testUtil.getConfiguration(); + normalizer = new SimpleRegionNormalizer(); + worker = new RegionNormalizerWorker(conf, masterServices, normalizer, queue); + final RegionNormalizerManager normalizerManager = + new RegionNormalizerManager(tracker, chore, queue, worker); + configurationManager = new ConfigurationManager(); + configurationManager.registerObserver(normalizerManager); + } + + @Test + public void test() { + assertTrue(normalizer.isMergeEnabled()); + assertEquals(3, normalizer.getMinRegionCount()); + assertEquals(1_000_000L, parseConfiguredRateLimit(worker.getRateLimiter())); + + final Configuration newConf = new Configuration(conf); + // configs on SimpleRegionNormalizer + newConf.setBoolean("hbase.normalizer.merge.enabled", false); + newConf.setInt("hbase.normalizer.min.region.count", 100); + // config on RegionNormalizerWorker + newConf.set("hbase.normalizer.throughput.max_bytes_per_sec", "12g"); + + configurationManager.notifyAllObservers(newConf); + assertFalse(normalizer.isMergeEnabled()); + assertEquals(100, normalizer.getMinRegionCount()); + assertEquals(12_884L, parseConfiguredRateLimit(worker.getRateLimiter())); + } + + /** + * The {@link RateLimiter} class does not publicly expose its currently configured rate. It does + * offer this information in the {@link RateLimiter#toString()} method. It's fragile, but parse + * this value. The alternative would be to track the value explicitly in the worker, and the + * associated coordination overhead paid at runtime. See the related note on + * {@link RegionNormalizerWorker#getRateLimiter()}. + */ + private static long parseConfiguredRateLimit(final RateLimiter rateLimiter) { + final String val = rateLimiter.toString(); + final Matcher matcher = rateLimitPattern.matcher(val); + assertTrue(matcher.matches()); + final String parsedRate = matcher.group("rate"); + return (long) Double.parseDouble(parsedRate); + } +} From 7793798d97ac038446da6f7ee3a6e0979edccac2 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 2 Nov 2020 20:12:39 +0530 Subject: [PATCH 476/769] HBASE-25218 : Add 2.3.3 to the downloads page Closes #2615 Signed-off-by: Jan Hentschel --- src/site/xdoc/downloads.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index 37ea25af5e75..bbd60e5e2e4a 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -45,24 +45,24 @@ under the License. - 2.3.2 + 2.3.3 - 2020/09/25 + 2020/11/02 - 2.3.1 vs 2.3.2 + 2.3.2 vs 2.3.3 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) From 026d216115b7e13b1a2a0fba67f60024d0daabd3 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Mon, 2 Nov 2020 13:11:09 -0800 Subject: [PATCH 477/769] HBASE-25228 Delete dev-support/jenkins_precommit_jira_yetus.sh (#2611) Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel --- dev-support/jenkins_precommit_jira_yetus.sh | 182 -------------------- 1 file changed, 182 deletions(-) delete mode 100755 dev-support/jenkins_precommit_jira_yetus.sh diff --git a/dev-support/jenkins_precommit_jira_yetus.sh b/dev-support/jenkins_precommit_jira_yetus.sh deleted file mode 100755 index d721ac8faba6..000000000000 --- a/dev-support/jenkins_precommit_jira_yetus.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -if [[ "true" = "${DEBUG}" ]]; then - set -x - printenv -fi - -##To set jenkins Environment Variables: -export TOOLS_HOME=/home/jenkins/tools -#export JAVA_HOME=${JAVA_HOME_HADOOP_MACHINES_HOME} -export FINDBUGS_HOME=${TOOLS_HOME}/findbugs/latest -export CLOVER_HOME=${TOOLS_HOME}/clover/latest -#export MAVEN_HOME=${MAVEN_3_0_4_HOME} -export MAVEN_HOME=/home/jenkins/tools/maven/apache-maven-3.0.5 - -#export PATH=$PATH:${JAVA_HOME}/bin:${MAVEN_HOME}/bin: -export PATH=$PATH:${MAVEN_HOME}/bin: - -YETUS_RELEASE=0.12.0 -COMPONENT=${WORKSPACE}/component -TEST_FRAMEWORK=${WORKSPACE}/test_framework - -PATCHPROCESS=${WORKSPACE}/patchprocess -if [[ -d ${PATCHPROCESS} ]]; then - echo "[WARN] patch process already existed '${PATCHPROCESS}'" - rm -rf "${PATCHPROCESS}" -fi -mkdir -p "${PATCHPROCESS}" - - -## Checking on H* machine nonsense -echo "JAVA_HOME: ${JAVA_HOME}" -ls -l "${JAVA_HOME}" || true -echo "MAVEN_HOME: ${MAVEN_HOME}" -echo "maven version:" -mvn --offline --version || true -echo "getting machine specs, find in ${BUILD_URL}/artifact/patchprocess/machine/" -mkdir "${PATCHPROCESS}/machine" -cat /proc/cpuinfo >"${PATCHPROCESS}/machine/cpuinfo" 2>&1 || true -cat /proc/meminfo >"${PATCHPROCESS}/machine/meminfo" 2>&1 || true -cat /proc/diskstats >"${PATCHPROCESS}/machine/diskstats" 2>&1 || true -cat /sys/block/sda/stat >"${PATCHPROCESS}/machine/sys-block-sda-stat" 2>&1 || true -df -h >"${PATCHPROCESS}/machine/df-h" 2>&1 || true -ps -Awwf >"${PATCHPROCESS}/machine/ps-Awwf" 2>&1 || true -ifconfig -a >"${PATCHPROCESS}/machine/ifconfig-a" 2>&1 || true -lsblk -ta >"${PATCHPROCESS}/machine/lsblk-ta" 2>&1 || true -lsblk -fa >"${PATCHPROCESS}/machine/lsblk-fa" 2>&1 || true -cat /proc/loadavg >"${PATCHPROCESS}/loadavg" 2>&1 || true -ulimit -a >"${PATCHPROCESS}/machine/ulimit-a" 2>&1 || true - -## /H* - -### Download Yetus -if [[ "true" != "${USE_YETUS_PRERELEASE}" ]]; then - if [ ! -d "${TEST_FRAMEWORK}/yetus-${YETUS_RELEASE}" ]; then - mkdir -p "${TEST_FRAMEWORK}" - cd "${TEST_FRAMEWORK}" || exit 1 - # clear out any cached 'use a prerelease' versions - rm -rf apache-yetus-* - - mkdir -p "${TEST_FRAMEWORK}/.gpg" - chmod -R 700 "${TEST_FRAMEWORK}/.gpg" - - curl -L --fail -o "${TEST_FRAMEWORK}/KEYS_YETUS" https://dist.apache.org/repos/dist/release/yetus/KEYS - gpg --homedir "${TEST_FRAMEWORK}/.gpg" --import "${TEST_FRAMEWORK}/KEYS_YETUS" - - ## Release - curl -L --fail -O "https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz" - curl -L --fail -O "https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/apache-yetus-${YETUS_RELEASE}-bin.tar.gz.asc" - gpg --homedir "${TEST_FRAMEWORK}/.gpg" --verify "apache-yetus-${YETUS_RELEASE}-bin.tar.gz.asc" - tar xzpf "apache-yetus-${YETUS_RELEASE}-bin.tar.gz" - fi - TESTPATCHBIN=${TEST_FRAMEWORK}/apache-yetus-${YETUS_RELEASE}/bin/test-patch - TESTPATCHLIB=${TEST_FRAMEWORK}/apache-yetus-${YETUS_RELEASE}/lib/precommit -else - prerelease_dirs=("${TEST_FRAMEWORK}/${YETUS_PRERELEASE_GITHUB/\//-}-*") - if [ ! -d "${prerelease_dirs[0]}" ]; then - mkdir -p "${TEST_FRAMEWORK}" - cd "${TEST_FRAMEWORK}" || exit - ## from github - curl -L --fail "https://api.github.com/repos/${YETUS_PRERELEASE_GITHUB}/tarball/HEAD" > yetus.tar.gz - tar xvpf yetus.tar.gz - prerelease_dirs=("${TEST_FRAMEWORK}/${YETUS_PRERELEASE_GITHUB/\//-}-*") - fi - TESTPATCHBIN=${prerelease_dirs[0]}/precommit/test-patch.sh - TESTPATCHLIB=${prerelease_dirs[0]}/precommit -fi - -if [[ "true" = "${DEBUG}" ]]; then - # DEBUG print the test framework - ls -l "${TESTPATCHBIN}" - ls -la "${TESTPATCHLIB}/test-patch.d/" - # DEBUG print the local customization - if [ -d "${COMPONENT}/dev-support/test-patch.d" ]; then - ls -la "${COMPONENT}/dev-support/test-patch.d/" - fi - YETUS_ARGS=(--debug "${YETUS_ARGS[@]}") -fi - - -if [ ! -x "${TESTPATCHBIN}" ] && [ -n "${TEST_FRAMEWORK}" ] && [ -d "${TEST_FRAMEWORK}" ]; then - echo "Something is amiss with the test framework; removing it. please re-run." - rm -rf "${TEST_FRAMEWORK}" - exit 1 -fi - -cd "${WORKSPACE}" || exit - - -# -# Yetus *always* builds with JAVA_HOME, so no need to list it. -# -# non-docker-mode JDK: -# --findbugs-home=/home/jenkins/tools/findbugs/latest \ - -# docker-mode: (openjdk 7 added for free) -# --findbugs-home=/usr \ -# --docker \ -# --multijdkdirs="/usr/lib/jvm/java-8-openjdk-amd64" \ - -if [[ "true" = "${RUN_IN_DOCKER}" ]]; then - YETUS_ARGS=( - --docker \ - "--multijdkdirs=/usr/lib/jvm/java-8-openjdk-amd64" \ - "--findbugs-home=/usr" \ - "${YETUS_ARGS[@]}" \ - ) - if [ -r "${COMPONENT}/dev-support/docker/Dockerfile" ]; then - YETUS_ARGS=("--dockerfile=${COMPONENT}/dev-support/docker/Dockerfile" "${YETUS_ARGS[@]}") - fi -else - YETUS_ARGS=("--findbugs-home=/home/jenkins/tools/findbugs/latest" "${YETUS_ARGS[@]}") -fi - -if [ -d "${COMPONENT}/dev-support/test-patch.d" ]; then - YETUS_ARGS=("--user-plugins=${COMPONENT}/dev-support/test-patch.d" "${YETUS_ARGS[@]}") -fi - -# I don't trust Yetus compat enough yet, so in prerelease mode, skip our personality. -# this should give us an incentive to update the Yetus exemplar for HBase periodically. -if [ -r "${COMPONENT}/dev-support/hbase-personality.sh" ] && [[ "true" != "${USE_YETUS_PRERELEASE}" ]] ; then - YETUS_ARGS=("--personality=${COMPONENT}/dev-support/hbase-personality.sh" "${YETUS_ARGS[@]}") -fi - -if [[ true == "${QUICK_HADOOPCHECK}" ]]; then - YETUS_ARGS=("--quick-hadoopcheck" "${YETUS_ARGS[@]}") -fi - -if [[ true == "${SKIP_ERRORPRONE}" ]]; then - YETUS_ARGS=("--skip-errorprone" "${YETUS_ARGS[@]}") -fi - -YETUS_ARGS=("--skip-dirs=dev-support" "${YETUS_ARGS[@]}") - -/bin/bash "${TESTPATCHBIN}" \ - "${YETUS_ARGS[@]}" \ - --patch-dir="${PATCHPROCESS}" \ - --basedir="${COMPONENT}" \ - --mvn-custom-repos \ - --whitespace-eol-ignore-list=".*/generated/.*" \ - --whitespace-tabs-ignore-list=".*/generated/.*" \ - --jira-user=HBaseQA \ - --jira-password="${JIRA_PASSWORD}" \ - "HBASE-${ISSUE_NUM}" - -find "${COMPONENT}" -name target -exec chmod -R u+w {} \; From e9400970b6360a76d0ac6cf68b34a2d60d3f5038 Mon Sep 17 00:00:00 2001 From: Sandeep Pal <50725353+sandeepvinayak@users.noreply.github.com> Date: Tue, 3 Nov 2020 07:39:23 -0800 Subject: [PATCH 478/769] HBASE-24859: Optimize in-memory representation of HBase map reduce table splits (#2609) Patch fixes the single table input format case. Signed-off-by: Bharath Vissapragada --- .../hbase/mapreduce/TableInputFormatBase.java | 35 +++++++++++++------ .../hadoop/hbase/mapreduce/TableSplit.java | 34 ++++++++++++++---- .../TestTableInputFormatScanBase.java | 17 +++++---- 3 files changed, 62 insertions(+), 24 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index e7c5bf4fb2d7..8baf85ffb4d9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -26,10 +26,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -52,6 +48,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.net.DNS; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -287,7 +286,7 @@ public List getSplits(JobContext context) throws IOException { * Create one InputSplit per region * * @return The list of InputSplit for all the regions - * @throws IOException + * @throws IOException throws IOException */ private List oneInputSplitPerRegion() throws IOException { RegionSizeCalculator sizeCalculator = @@ -305,7 +304,10 @@ private List oneInputSplitPerRegion() throws IOException { } List splits = new ArrayList<>(1); long regionSize = sizeCalculator.getRegionSize(regLoc.getRegion().getRegionName()); - TableSplit split = new TableSplit(tableName, scan, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit split = new TableSplit(tableName, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); splits.add(split); @@ -345,7 +347,10 @@ private List oneInputSplitPerRegion() throws IOException { byte[] regionName = location.getRegion().getRegionName(); String encodedRegionName = location.getRegion().getEncodedName(); long regionSize = sizeCalculator.getRegionSize(regionName); - TableSplit split = new TableSplit(tableName, scan, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit split = new TableSplit(tableName, null, splitStart, splitStop, regionLocation, encodedRegionName, regionSize); splits.add(split); if (LOG.isDebugEnabled()) { @@ -362,7 +367,7 @@ private List oneInputSplitPerRegion() throws IOException { * @param n Number of ranges after splitting. Pass 1 means no split for the range * Pass 2 if you want to split the range in two; * @return A list of TableSplit, the size of the list is n - * @throws IllegalArgumentIOException + * @throws IllegalArgumentIOException throws IllegalArgumentIOException */ protected List createNInputSplitsUniform(InputSplit split, int n) throws IllegalArgumentIOException { @@ -409,9 +414,12 @@ protected List createNInputSplitsUniform(InputSplit split, int n) // Split Region into n chunks evenly byte[][] splitKeys = Bytes.split(startRow, endRow, true, n-1); for (int i = 0; i < splitKeys.length - 1; i++) { + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 //notice that the regionSize parameter may be not very accurate TableSplit tsplit = - new TableSplit(tableName, scan, splitKeys[i], splitKeys[i + 1], regionLocation, + new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], regionLocation, encodedRegionName, regionSize / n); res.add(tsplit); } @@ -488,7 +496,10 @@ public List calculateAutoBalancedSplits(List splits, lon } } i = j - 1; - TableSplit t = new TableSplit(tableName, scan, splitStartKey, splitEndKey, regionLocation, + // In the table input format for single table we do not need to + // store the scan object in table split because it can be memory intensive and redundant + // information to what is already stored in conf SCAN. See HBASE-25212 + TableSplit t = new TableSplit(tableName, null, splitStartKey, splitEndKey, regionLocation, encodedRegionName, totalSize); resultList.add(t); } @@ -508,7 +519,9 @@ String reverseDNS(InetAddress ipAddress) throws UnknownHostException { // reverse DNS using jndi doesn't work well with ipv6 addresses. ipAddressString = InetAddress.getByName(ipAddress.getHostAddress()).getHostName(); } - if (ipAddressString == null) throw new UnknownHostException("No host found for " + ipAddress); + if (ipAddressString == null) { + throw new UnknownHostException("No host found for " + ipAddress); + } hostName = Strings.domainNamePointerToHostName(ipAddressString); this.reverseDNSCacheMap.put(ipAddress, hostName); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index de42c31678ef..acce55e82ce8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -22,17 +22,16 @@ import java.io.DataOutput; import java.io.IOException; import java.util.Arrays; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A table split corresponds to a key range (low, high) and an optional scanner. @@ -40,7 +39,7 @@ */ @InterfaceAudience.Public public class TableSplit extends InputSplit -implements Writable, Comparable { + implements Writable, Comparable { /** @deprecated LOG variable would be made private. fix in hbase 3.0 */ @Deprecated public static final Logger LOG = LoggerFactory.getLogger(TableSplit.class); @@ -84,6 +83,16 @@ static Version fromCode(int code) { private byte [] endRow; private String regionLocation; private String encodedRegionName = ""; + + /** + * The scan object may be null but the serialized form of scan is never null + * or empty since we serialize the scan object with default values then. + * Having no scanner in TableSplit doesn't necessarily mean there is no scanner + * for mapreduce job, it just means that we do not need to set it for each split. + * For example, it is not required to have a scan object for + * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the + * job conf and scanner is supposed to be same for all the splits of table. + */ private String scan = ""; // stores the serialized form of the Scan private long length; // Contains estimation of region size in bytes @@ -182,12 +191,23 @@ public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, * Returns a Scan object from the stored string representation. * * @return Returns a Scan object based on the stored scanner. - * @throws IOException + * @throws IOException throws IOException if deserialization fails */ public Scan getScan() throws IOException { return TableMapReduceUtil.convertStringToScan(this.scan); } + /** + * Returns a scan string + * @return scan as string. Should be noted that this is not same as getScan().toString() + * because Scan object will have the default values when empty scan string is + * deserialized. Thus, getScan().toString() can never be empty + */ + @InterfaceAudience.Private + public String getScanAsString() { + return this.scan; + } + /** * Returns the table name converted to a byte array. * @see #getTable() diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index 4b18624f9241..7855747b1664 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -86,7 +85,7 @@ public static void tearDownAfterClass() throws Exception { * Pass the key and value to reduce. */ public static class ScanMapper - extends TableMapper { + extends TableMapper { /** * Pass the key and value to reduce. @@ -99,7 +98,7 @@ public static class ScanMapper @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { if (value.size() != 2) { throw new IOException("There should be two input columns"); } @@ -123,7 +122,7 @@ public void map(ImmutableBytesWritable key, Result value, * Checks the last and first key seen against the scanner boundaries. */ public static class ScanReducer - extends Reducer { private String first = null; @@ -131,7 +130,7 @@ public static class ScanReducer protected void reduce(ImmutableBytesWritable key, Iterable values, Context context) - throws IOException ,InterruptedException { + throws IOException ,InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); @@ -144,7 +143,7 @@ protected void reduce(ImmutableBytesWritable key, } protected void cleanup(Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration c = context.getConfiguration(); String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); @@ -249,6 +248,12 @@ protected void testNumOfSplits(int splitsPerRegion, int expectedNumOfSplits) tif.setConf(job.getConfiguration()); Assert.assertEquals(TABLE_NAME, table.getName()); List splits = tif.getSplits(job); + for (InputSplit split : splits) { + TableSplit tableSplit = (TableSplit) split; + // In table input format, we do no store the scanner at the split level + // because we use the scan object from the map-reduce job conf itself. + Assert.assertTrue(tableSplit.getScanAsString().isEmpty()); + } Assert.assertEquals(expectedNumOfSplits, splits.size()); } From 7d680c5c9fdbba275d600cceeef9bb2352cb7a6f Mon Sep 17 00:00:00 2001 From: niuyulin Date: Wed, 4 Nov 2020 01:49:38 +0800 Subject: [PATCH 479/769] HBASE-25210 RegionInfo.isOffline is now a duplication with RegionInfo.isSplit (#2580) Signed-off-by: Duo Zhang Signed-off-by: stack --- .../org/apache/hadoop/hbase/client/MutableRegionInfo.java | 6 ++++++ .../java/org/apache/hadoop/hbase/client/RegionInfo.java | 6 ++++++ .../org/apache/hadoop/hbase/client/RegionInfoBuilder.java | 1 + 3 files changed, 13 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 5d48991cf205..028608db614d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -254,8 +254,11 @@ public MutableRegionInfo setSplit(boolean split) { /** * @return True if this region is offline. + * @deprecated since 3.0.0 and will be removed in 4.0.0 + * @see HBASE-25210 */ @Override + @Deprecated public boolean isOffline() { return this.offLine; } @@ -273,8 +276,11 @@ public MutableRegionInfo setOffline(boolean offLine) { /** * @return True if this is a split parent region. + * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. + * @see HBASE-25210 */ @Override + @Deprecated public boolean isSplitParent() { if (!isSplit()) { return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index d860c7681a37..d7460e9d15ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -218,12 +218,18 @@ public interface RegionInfo extends Comparable { /** * @return True if this region is offline. + * @deprecated since 3.0.0 and will be removed in 4.0.0 + * @see HBASE-25210 */ + @Deprecated boolean isOffline(); /** * @return True if this is a split parent region. + * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. + * @see HBASE-25210 */ + @Deprecated boolean isSplitParent(); /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index a9e7806ad9d3..cbf9e4a3c219 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -100,6 +100,7 @@ public RegionInfoBuilder setSplit(boolean split) { return this; } + @Deprecated public RegionInfoBuilder setOffline(boolean offLine) { this.offLine = offLine; return this; From 6595ab677613ab5df4e5f6e2406f4030b21b14fb Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Tue, 3 Nov 2020 15:20:27 -0800 Subject: [PATCH 480/769] HBASE-25212 Optionally abort requests in progress after deciding a region should close (#2574) If hbase.regionserver.close.wait.abort is set to true, interrupt RPC handler threads holding the region close lock. Until requests in progress can be aborted, wait on the region close lock for a configurable interval (specified by hbase.regionserver.close.wait.time.ms, default 60000 (1 minute)). If we have failed to acquire the close lock after this interval elapses, if allowed (also specified by hbase.regionserver.close.wait.abort), abort the regionserver. We will attempt to interrupt any running handlers every hbase.regionserver.close.wait.interval.ms (default 10000 (10 seconds)) until either the close lock is acquired or we reach the maximum wait time. Define a subset of region operations as interruptible. Track threads holding the close lock transiting those operations. Set the thread interrupt status of tracked threads when trying to close the region. Use the thread interrupt status where safe to break out of request processing. Signed-off-by: Bharath Vissapragada Signed-off-by: Duo Zhang Signed-off-by: Reid Chan Signed-off-by: Viraj Jasani --- .../hadoop/hbase/regionserver/HRegion.java | 301 +++++++++++++-- .../hadoop/hbase/regionserver/Region.java | 3 +- .../hadoop/hbase/HBaseTestingUtility.java | 18 +- .../TestCacheOnWriteInSchema.java | 2 +- .../regionserver/TestFailedAppendAndSync.java | 10 +- .../hbase/regionserver/TestHRegion.java | 272 +++++++++++-- .../regionserver/TestHRegionReplayEvents.java | 2 +- .../TestHRegionWithInMemoryFlush.java | 7 +- .../regionserver/TestRegionIncrement.java | 2 +- .../regionserver/TestRegionInterrupt.java | 363 ++++++++++++++++++ .../hbase/regionserver/TestWALLockup.java | 10 +- .../regionserver/wal/AbstractTestFSWAL.java | 2 +- .../hbase/regionserver/wal/TestFSHLog.java | 2 +- .../wal/WALDurabilityTestBase.java | 12 +- 14 files changed, 911 insertions(+), 95 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 57a1e1f5de93..bca18dbcb013 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -688,7 +688,7 @@ void sawNoSuchFamily() { // Last flush time for each Store. Useful when we are flushing for each column private final ConcurrentMap lastStoreFlushTimeMap = new ConcurrentHashMap<>(); - final RegionServerServices rsServices; + protected RegionServerServices rsServices; private RegionServerAccounting rsAccounting; private long flushCheckInterval; // flushPerChanges is to prevent too many changes in memstore @@ -696,6 +696,10 @@ void sawNoSuchFamily() { private long blockingMemStoreSize; // Used to guard closes final ReentrantReadWriteLock lock; + // Used to track interruptible holders of the region lock. Currently that is only RPC handler + // threads. Boolean value in map determines if lock holder can be interrupted, normally true, + // but may be false when thread is transiting a critical section. + final ConcurrentHashMap regionLockHolders; // Stop updates lock private final ReentrantReadWriteLock updatesLock = new ReentrantReadWriteLock(); @@ -788,6 +792,7 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR; this.lock = new ReentrantReadWriteLock(conf.getBoolean(FAIR_REENTRANT_CLOSE_LOCK, DEFAULT_FAIR_REENTRANT_CLOSE_LOCK)); + this.regionLockHolders = new ConcurrentHashMap<>(); this.flushCheckInterval = conf.getInt(MEMSTORE_PERIODIC_FLUSH_INTERVAL, DEFAULT_CACHE_FLUSH_INTERVAL); this.flushPerChanges = conf.getLong(MEMSTORE_FLUSH_PER_CHANGES, DEFAULT_FLUSH_PER_CHANGES); @@ -1174,7 +1179,7 @@ public HStore call() throws IOException { LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + this); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw throwOnInterrupt(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { @@ -1578,6 +1583,13 @@ public Map> close() throws IOException { */ public static final long MAX_FLUSH_PER_CHANGES = 1000000000; // 1G + public static final String CLOSE_WAIT_ABORT = "hbase.regionserver.close.wait.abort"; + public static final boolean DEFAULT_CLOSE_WAIT_ABORT = true; + public static final String CLOSE_WAIT_TIME = "hbase.regionserver.close.wait.time.ms"; + public static final long DEFAULT_CLOSE_WAIT_TIME = 60000; // 1 minute + public static final String CLOSE_WAIT_INTERVAL = "hbase.regionserver.close.wait.interval.ms"; + public static final long DEFAULT_CLOSE_WAIT_INTERVAL = 10000; // 10 seconds + public Map> close(boolean abort) throws IOException { return close(abort, false); } @@ -1679,22 +1691,103 @@ private Map> doClose(boolean abort, MonitoredTask statu } } - if (timeoutForWriteLock == null - || timeoutForWriteLock == Long.MAX_VALUE) { - // block waiting for the lock for closing - lock.writeLock().lock(); // FindBugs: Complains UL_UNRELEASED_LOCK_EXCEPTION_PATH but seems fine - } else { - try { - boolean succeed = lock.writeLock().tryLock(timeoutForWriteLock, TimeUnit.SECONDS); - if (!succeed) { - throw new IOException("Failed to get write lock when closing region"); + // Set the closing flag + // From this point new arrivals at the region lock will get NSRE. + + this.closing.set(true); + LOG.info("Closing region {}", this); + + // Acquire the close lock + + // The configuration parameter CLOSE_WAIT_ABORT is overloaded to enable both + // the new regionserver abort condition and interrupts for running requests. + // If CLOSE_WAIT_ABORT is not enabled there is no change from earlier behavior, + // we will not attempt to interrupt threads servicing requests nor crash out + // the regionserver if something remains stubborn. + + final boolean canAbort = conf.getBoolean(CLOSE_WAIT_ABORT, DEFAULT_CLOSE_WAIT_ABORT); + boolean useTimedWait = false; + if (timeoutForWriteLock != null && timeoutForWriteLock != Long.MAX_VALUE) { + // convert legacy use of timeoutForWriteLock in seconds to new use in millis + timeoutForWriteLock = TimeUnit.SECONDS.toMillis(timeoutForWriteLock); + useTimedWait = true; + } else if (canAbort) { + timeoutForWriteLock = conf.getLong(CLOSE_WAIT_TIME, DEFAULT_CLOSE_WAIT_TIME); + useTimedWait = true; + } + if (LOG.isDebugEnabled()) { + LOG.debug((useTimedWait ? "Time limited wait" : "Waiting without time limit") + + " for close lock on " + this); + } + final long closeWaitInterval = conf.getLong(CLOSE_WAIT_INTERVAL, DEFAULT_CLOSE_WAIT_INTERVAL); + long elapsedWaitTime = 0; + if (useTimedWait) { + // Sanity check configuration + long remainingWaitTime = timeoutForWriteLock; + if (remainingWaitTime < closeWaitInterval) { + LOG.warn("Time limit for close wait of " + timeoutForWriteLock + + " ms is less than the configured lock acquisition wait interval " + + closeWaitInterval + " ms, using wait interval as time limit"); + remainingWaitTime = closeWaitInterval; + } + boolean acquired = false; + do { + long start = EnvironmentEdgeManager.currentTime(); + try { + acquired = lock.writeLock().tryLock(Math.min(remainingWaitTime, closeWaitInterval), + TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + // Interrupted waiting for close lock. More likely the server is shutting down, not + // normal operation, so aborting upon interrupt while waiting on this lock would not + // provide much value. Throw an IOE (as IIOE) like we would in the case where we + // fail to acquire the lock. + String msg = "Interrupted while waiting for close lock on " + this; + LOG.warn(msg, e); + throw (InterruptedIOException) new InterruptedIOException(msg).initCause(e); + } + long elapsed = EnvironmentEdgeManager.currentTime() - start; + elapsedWaitTime += elapsed; + remainingWaitTime -= elapsed; + if (canAbort && !acquired && remainingWaitTime > 0) { + // Before we loop to wait again, interrupt all region operations that might + // still be in progress, to encourage them to break out of waiting states or + // inner loops, throw an exception to clients, and release the read lock via + // endRegionOperation. + if (LOG.isDebugEnabled()) { + LOG.debug("Interrupting region operations after waiting for close lock for " + + elapsedWaitTime + " ms on " + this + ", " + remainingWaitTime + + " ms remaining"); + } + interruptRegionOperations(); } - } catch (InterruptedException e) { - throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } while (!acquired && remainingWaitTime > 0); + + // If we fail to acquire the lock, trigger an abort if we can; otherwise throw an IOE + // to let the caller know we could not proceed with the close. + if (!acquired) { + String msg = "Failed to acquire close lock on " + this + " after waiting " + + elapsedWaitTime + " ms"; + LOG.error(msg); + if (canAbort) { + // If we failed to acquire the write lock, abort the server + rsServices.abort(msg, null); + } + throw new IOException(msg); } + + } else { + + long start = EnvironmentEdgeManager.currentTime(); + lock.writeLock().lock(); + elapsedWaitTime = EnvironmentEdgeManager.currentTime() - start; + } - this.closing.set(true); - LOG.info("Closing region {}", this); + + if (LOG.isDebugEnabled()) { + LOG.debug("Acquired close lock on " + this + " after waiting " + + elapsedWaitTime + " ms"); + } + status.setStatus("Disabling writes for close"); try { if (this.isClosed()) { @@ -1782,7 +1875,7 @@ public Pair> call() throws IOException { familyFiles.addAll(storeFiles.getSecond()); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw throwOnInterrupt(e); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof IOException) { @@ -4549,6 +4642,11 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { MiniBatchOperationInProgress miniBatchOp = null; /** Keep track of the locks we hold so we can release them in finally clause */ List acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size()); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + try { // STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with // locked rows @@ -4562,20 +4660,31 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { return; } + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. Do it before we take the lock and disable interrupts for + // the WAL append. + checkInterrupt(); + lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount()); locked = true; + // From this point until memstore update this operation should not be interrupted. + disableInterrupts(); + // STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp // We should record the timestamp only after we have acquired the rowLock, // otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer // timestamp + long now = EnvironmentEdgeManager.currentTime(); batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks); // STEP 3. Build WAL edit + List> walEdits = batchOp.buildWALEdits(miniBatchOp); // STEP 4. Append the WALEdits to WAL and sync. + for(Iterator> it = walEdits.iterator(); it.hasNext();) { Pair nonceKeyWALEditPair = it.next(); walEdit = nonceKeyWALEditPair.getSecond(); @@ -4611,6 +4720,8 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { } releaseRowLocks(acquiredRowLocks); + enableInterrupts(); + final int finalLastIndexExclusive = miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size(); final boolean finalSuccess = success; @@ -6588,13 +6699,12 @@ protected RowLock getRowLockInternal(byte[] row, boolean readLock, final RowLock success = true; return result; } catch (InterruptedException ie) { - LOG.warn("Thread interrupted waiting for lock on row: {}, in region {}", rowKey, - getRegionInfo().getRegionNameAsString()); - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); + if (LOG.isDebugEnabled()) { + LOG.debug("Thread interrupted waiting for lock on row: {}, in region {}", rowKey, + getRegionInfo().getRegionNameAsString()); + } TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock"); - Thread.currentThread().interrupt(); - throw iie; + throw throwOnInterrupt(ie); } catch (Error error) { // The maximum lock count for read lock is 64K (hardcoded), when this maximum count // is reached, it will throw out an Error. This Error needs to be caught so it can @@ -7286,6 +7396,10 @@ private boolean populateResult(List results, KeyValueHeap heap, // Scanning between column families and thus the scope is between cells LimitScope limitScope = LimitScope.BETWEEN_CELLS; do { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + // We want to maintain any progress that is made towards the limits while scanning across // different column families. To do this, we toggle the keep progress flag on during calls // to the StoreScanner to ensure that any progress made thus far is not wiped away. @@ -7384,6 +7498,10 @@ private boolean nextInternal(List results, ScannerContext scannerContext) } } + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + // Let's see what we have in the storeHeap. Cell current = this.storeHeap.peek(); @@ -7464,6 +7582,10 @@ private boolean nextInternal(List results, ScannerContext scannerContext) return true; } + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + Cell nextKv = this.storeHeap.peek(); shouldStop = shouldStop(nextKv); // save that the row was empty before filters applied to it. @@ -7623,6 +7745,9 @@ protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws Cell next; while ((next = this.storeHeap.peek()) != null && CellUtil.matchingRows(next, curRowCell)) { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); this.storeHeap.next(MOCKED_LIST); } resetFilters(); @@ -8288,6 +8413,11 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, // when it assigns the edit a sequencedid (A.K.A the mvcc write number). WriteEntry writeEntry = null; MemStoreSizing memstoreAccounting = new NonThreadSafeMemStoreSizing(); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + checkInterrupt(); + try { boolean success = false; try { @@ -8303,9 +8433,19 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, prevRowLock = rowLock; } } + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. Do it before we take the lock and disable interrupts for + // the WAL append. + checkInterrupt(); + // STEP 3. Region lock lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : acquiredRowLocks.size()); locked = true; + + // From this point until memstore update this operation should not be interrupted. + disableInterrupts(); + long now = EnvironmentEdgeManager.currentTime(); // STEP 4. Let the processor scan the rows, generate mutations and add waledits doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout); @@ -8371,6 +8511,8 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, } // release locks if some were acquired but another timed out releaseRowLocks(acquiredRowLocks); + + enableInterrupts(); } // 12. Run post-process hook @@ -8433,6 +8575,8 @@ public Void call() throws IOException { rowProcessorExecutor.execute(task); try { task.get(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException ie) { + throw throwOnInterrupt(ie); } catch (TimeoutException te) { String row = processor.getRowsToLock().isEmpty() ? "" : " on row(s):" + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "..."; @@ -8528,11 +8672,6 @@ private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, Listcells using comparator */ @@ -8558,7 +8697,7 @@ private static List sort(List cells, final CellComparator comparator (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing (3 * ClassSize.ATOMIC_LONG) + // numPutsWithoutWAL, dataInMemoryWithoutWAL, // compactionsFailed - (2 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints + (3 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints, regionLockHolders WriteState.HEAP_SIZE + // writestate ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + // stores (2 * ClassSize.REENTRANT_LOCK) + // lock, updatesLock @@ -8730,12 +8869,22 @@ public void startRegionOperation() throws IOException { @Override public void startRegionOperation(Operation op) throws IOException { + boolean isInterruptableOp = false; switch (op) { - case GET: // read operations + case GET: // interruptible read operations case SCAN: + isInterruptableOp = true; checkReadsEnabled(); break; - default: + case INCREMENT: // interruptible write operations + case APPEND: + case PUT: + case DELETE: + case BATCH_MUTATE: + case CHECK_AND_MUTATE: + isInterruptableOp = true; + break; + default: // all others break; } if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION @@ -8748,6 +8897,12 @@ public void startRegionOperation(Operation op) throws IOException { throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } lock(lock.readLock()); + // Update regionLockHolders ONLY for any startRegionOperation call that is invoked from + // an RPC handler + Thread thisThread = Thread.currentThread(); + if (isInterruptableOp) { + regionLockHolders.put(thisThread, true); + } if (this.closed.get()) { lock.readLock().unlock(); throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); @@ -8762,6 +8917,11 @@ public void startRegionOperation(Operation op) throws IOException { coprocessorHost.postStartRegionOperation(op); } } catch (Exception e) { + if (isInterruptableOp) { + // would be harmless to remove what we didn't add but we know by 'isInterruptableOp' + // if we added this thread to regionLockHolders + regionLockHolders.remove(thisThread); + } lock.readLock().unlock(); throw new IOException(e); } @@ -8777,6 +8937,8 @@ public void closeRegionOperation(Operation operation) throws IOException { if (operation == Operation.SNAPSHOT) { stores.values().forEach(HStore::postSnapshotOperation); } + Thread thisThread = Thread.currentThread(); + regionLockHolders.remove(thisThread); lock.readLock().unlock(); if (coprocessorHost != null) { coprocessorHost.postCloseRegionOperation(operation); @@ -8792,8 +8954,7 @@ public void closeRegionOperation(Operation operation) throws IOException { * @throws RegionTooBusyException if failed to get the lock in time * @throws InterruptedIOException if interrupted while waiting for a lock */ - private void startBulkRegionOperation(boolean writeLockNeeded) - throws NotServingRegionException, RegionTooBusyException, InterruptedIOException { + private void startBulkRegionOperation(boolean writeLockNeeded) throws IOException { if (this.closing.get()) { throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } @@ -8804,6 +8965,7 @@ private void startBulkRegionOperation(boolean writeLockNeeded) else lock.readLock().unlock(); throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); } + regionLockHolders.put(Thread.currentThread(), true); } /** @@ -8811,6 +8973,7 @@ private void startBulkRegionOperation(boolean writeLockNeeded) * to the try block of #startRegionOperation */ private void closeBulkRegionOperation(){ + regionLockHolders.remove(Thread.currentThread()); if (lock.writeLock().isHeldByCurrentThread()) lock.writeLock().unlock(); else lock.readLock().unlock(); } @@ -8841,7 +9004,7 @@ private void recordMutationWithoutWal(final Map> familyMap) dataInMemoryWithoutWAL.add(mutationSize); } - private void lock(final Lock lock) throws RegionTooBusyException, InterruptedIOException { + private void lock(final Lock lock) throws IOException { lock(lock, 1); } @@ -8850,8 +9013,7 @@ private void lock(final Lock lock) throws RegionTooBusyException, InterruptedIOE * if failed to get the lock in time. Throw InterruptedIOException * if interrupted while waiting for the lock. */ - private void lock(final Lock lock, final int multiplier) - throws RegionTooBusyException, InterruptedIOException { + private void lock(final Lock lock, final int multiplier) throws IOException { try { final long waitTime = Math.min(maxBusyWaitDuration, busyWaitDuration * Math.min(multiplier, maxBusyWaitMultiplier)); @@ -8869,10 +9031,10 @@ private void lock(final Lock lock, final int multiplier) throw rtbe; } } catch (InterruptedException ie) { - LOG.info("Interrupted while waiting for a lock in region {}", this); - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); - throw iie; + if (LOG.isDebugEnabled()) { + LOG.debug("Interrupted while waiting for a lock in region {}", this); + } + throw throwOnInterrupt(ie); } } @@ -9000,6 +9162,67 @@ public long getReadPoint() { return getReadPoint(IsolationLevel.READ_COMMITTED); } + /** + * If a handler thread is eligible for interrupt, make it ineligible. Should be paired + * with {{@link #enableInterrupts()}. + */ + protected void disableInterrupts() { + regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> false); + } + + /** + * If a handler thread was made ineligible for interrupt via {{@link #disableInterrupts()}, + * make it eligible again. No-op if interrupts are already enabled. + */ + protected void enableInterrupts() { + regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> true); + } + + /** + * Interrupt any region options that have acquired the region lock via + * {@link #startRegionOperation(org.apache.hadoop.hbase.regionserver.Region.Operation)}, + * or {@link #startBulkRegionOperation(boolean)}. + */ + private void interruptRegionOperations() { + for (Map.Entry entry: regionLockHolders.entrySet()) { + // An entry in this map will have a boolean value indicating if it is currently + // eligible for interrupt; if so, we should interrupt it. + if (entry.getValue().booleanValue()) { + entry.getKey().interrupt(); + } + } + } + + /** + * Check thread interrupt status and throw an exception if interrupted. + * @throws NotServingRegionException if region is closing + * @throws InterruptedIOException if interrupted but region is not closing + */ + // Package scope for tests + void checkInterrupt() throws NotServingRegionException, InterruptedIOException { + if (Thread.interrupted()) { + if (this.closing.get()) { + throw new NotServingRegionException( + getRegionInfo().getRegionNameAsString() + " is closing"); + } + throw new InterruptedIOException(); + } + } + + /** + * Throw the correct exception upon interrupt + * @param t cause + */ + // Package scope for tests + IOException throwOnInterrupt(Throwable t) { + if (this.closing.get()) { + return (NotServingRegionException) new NotServingRegionException( + getRegionInfo().getRegionNameAsString() + " is closing") + .initCause(t); + } + return (InterruptedIOException) new InterruptedIOException().initCause(t); + } + /** * {@inheritDoc} */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 79df0013e087..900e5711415e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -199,7 +199,8 @@ public interface Region extends ConfigurationObserver { */ enum Operation { ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE, - REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH + REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH, + CHECK_AND_MUTATE } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index cb2e9e928931..29e888372908 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1979,14 +1979,15 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws /** * Create an HRegion that writes to the local tmp dirs with specified wal * @param info regioninfo + * @param conf configuration * @param desc table descriptor * @param wal wal for this region. * @return created hregion * @throws IOException */ - public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal) - throws IOException { - return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal); + public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc, + WAL wal) throws IOException { + return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal); } /** @@ -2000,14 +2001,15 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal * @throws IOException */ public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { - return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, isReadOnly, + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) + throws IOException { + return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly, durability, wal, null, families); } public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey, - byte[] stopKey, boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore, - byte[]... families) throws IOException { + byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, + boolean[] compactedMemStore, byte[]... families) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); builder.setReadOnly(isReadOnly); int i = 0; @@ -2027,7 +2029,7 @@ public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] s builder.setDurability(durability); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); - return createLocalHRegion(info, builder.build(), wal); + return createLocalHRegion(info, conf, builder.build(), wal); } // diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 3c9155371199..60ca5b3896b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -181,7 +181,7 @@ public void setUp() throws IOException { RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); walFactory = new WALFactory(conf, id); - region = TEST_UTIL.createLocalHRegion(info, htd, walFactory.getWAL(info)); + region = TEST_UTIL.createLocalHRegion(info, conf, htd, walFactory.getWAL(info)); region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); store = new HStore(region, hcd, conf, false); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java index fdf96dab87fc..dab82144f04a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java @@ -203,7 +203,7 @@ public void testLockupAroundBadAssignSync() throws IOException { boolean threwOnAppend = false; boolean threwOnBoth = false; - HRegion region = initHRegion(tableName, null, null, dodgyWAL); + HRegion region = initHRegion(tableName, null, null, CONF, dodgyWAL); try { // Get some random bytes. byte[] value = Bytes.toBytes(getName()); @@ -316,11 +316,11 @@ public void testLockupAroundBadAssignSync() throws IOException { * @return A region on which you must call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ - public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal) - throws IOException { + public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + Configuration conf, WAL wal) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, false, Durability.SYNC_WAL, - wal, COLUMN_FAMILY_BYTES); + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, + Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index bbc73e3bda59..da3f2204ddd3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -30,6 +30,8 @@ import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -136,6 +138,7 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.HRegion.MutationBatchOperation; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; +import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.Region.RowLock; import org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; @@ -354,7 +357,7 @@ public void sync(long txid) throws IOException { Path rootDir = new Path(dir + "testMemstoreSnapshotSize"); MyFaultyFSLog faultyLog = new MyFaultyFSLog(fs, rootDir, "testMemstoreSnapshotSize", CONF); faultyLog.init(); - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, faultyLog, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, faultyLog, COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); @@ -401,8 +404,8 @@ public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOExcep Path rootDir = new Path(dir + testName); FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF); hLog.init(); - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, - COLUMN_FAMILY_BYTES); + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, hLog, + COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); assertEquals(0, region.getMemStoreDataSize()); @@ -500,7 +503,7 @@ public Object run() throws Exception { HRegion region = null; try { // Initialize region - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); @@ -565,7 +568,7 @@ public Object run() throws Exception { HRegion region = null; try { // Initialize region - region = initHRegion(tableName, null, null, false, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); @@ -1055,7 +1058,7 @@ public void testFlushMarkers() throws Exception { final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build()); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family); + HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -1260,7 +1263,7 @@ public long getSyncedLength() { CommonFSUtils.getRootDir(walConf), method, walConf); wal.init(); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family); + HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); int i = 0; Put put = new Put(Bytes.toBytes(i)); put.setDurability(Durability.SKIP_WAL); // have to skip mocked wal @@ -1291,7 +1294,7 @@ public long getSyncedLength() { method, walConf); wal.init(); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family); + HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); region.put(put); // 3. Test case where ABORT_FLUSH will throw exception. // Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with @@ -3240,7 +3243,7 @@ public void testDataInMemoryWithoutWAL() throws IOException { hLog.init(); // This chunk creation is done throughout the code base. Do we want to move it into core? // It is missing from this test. W/o it we NPE. - region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); Cell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) @@ -3513,7 +3516,7 @@ public void testGetWithFilter() throws IOException, InterruptedException { RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log"); final WAL wal = HBaseTestingUtility.createWal(TEST_UTIL.getConfiguration(), logDir, info); - this.region = TEST_UTIL.createLocalHRegion(info, tableDescriptor, wal); + this.region = TEST_UTIL.createLocalHRegion(info, CONF, tableDescriptor, wal); // Put 4 version to memstore long ts = 0; @@ -5405,7 +5408,7 @@ private void durabilityTest(String method, Durability tableDurability, final WALFactory wals = new WALFactory(walConf, HBaseTestingUtility.getRandomUUID().toString()); final WAL wal = spy(wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build())); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, tableDurability, wal, + HConstants.EMPTY_END_ROW, CONF, false, tableDurability, wal, new byte[][] { family }); Put put = new Put(Bytes.toBytes("r1")); @@ -5772,7 +5775,7 @@ protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopK RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); final WAL wal = HBaseTestingUtility.createWal(conf, logDir, hri); - return initHRegion(tableName, startKey, stopKey, isReadOnly, Durability.SYNC_WAL, wal, + return initHRegion(tableName, startKey, stopKey, conf, isReadOnly, Durability.SYNC_WAL, wal, families); } @@ -5781,11 +5784,12 @@ protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopK * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, + byte[]... families) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, - isReadOnly, durability, wal, families); + conf, isReadOnly, durability, wal, families); } /** @@ -6052,9 +6056,9 @@ public void testReverseScanner_smaller_blocksize() throws IOException { byte[] col1 = Bytes.toBytes("col1"); byte[] col2 = Bytes.toBytes("col2"); long ts = 1; - HBaseConfiguration config = new HBaseConfiguration(); - config.setInt("test.block.size", 1); - this.region = initHRegion(tableName, method, config, families); + Configuration conf = new Configuration(CONF); + conf.setInt("test.block.size", 1); + this.region = initHRegion(tableName, method, conf, families); KeyValue kv1 = new KeyValue(rowA, cf, col1, ts, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowB, cf, col1, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowC, cf, col1, ts, KeyValue.Type.Put, null); @@ -6132,7 +6136,7 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() byte[][] families = { cf1, cf2, cf3 }; byte[] col = Bytes.toBytes("C"); long ts = 1; - HBaseConfiguration conf = new HBaseConfiguration(); + Configuration conf = new Configuration(CONF); // disable compactions in this test. conf.setInt("hbase.hstore.compactionThreshold", 10000); this.region = initHRegion(tableName, method, conf, families); @@ -6294,7 +6298,7 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() byte[][] families = { cf1, cf2, cf3, cf4 }; byte[] col = Bytes.toBytes("C"); long ts = 1; - HBaseConfiguration conf = new HBaseConfiguration(); + Configuration conf = new Configuration(CONF); // disable compactions in this test. conf.setInt("hbase.hstore.compactionThreshold", 10000); this.region = initHRegion(tableName, method, conf, families); @@ -6360,7 +6364,7 @@ public void testReverseScanner_StackOverflow() throws IOException { byte[] cf1 = Bytes.toBytes("CF1"); byte[][] families = {cf1}; byte[] col = Bytes.toBytes("C"); - HBaseConfiguration conf = new HBaseConfiguration(); + Configuration conf = new Configuration(CONF); this.region = initHRegion(tableName, method, conf, families); // setup with one storefile and one memstore, to create scanner and get an earlier readPt Put put = new Put(Bytes.toBytes("19998")); @@ -6409,8 +6413,7 @@ public void testReverseScanShouldNotScanMemstoreIfReadPtLesser() throws Exceptio byte[] cf1 = Bytes.toBytes("CF1"); byte[][] families = { cf1 }; byte[] col = Bytes.toBytes("C"); - HBaseConfiguration conf = new HBaseConfiguration(); - this.region = initHRegion(tableName, method, conf, families); + this.region = initHRegion(tableName, method, CONF, families); // setup with one storefile and one memstore, to create scanner and get an earlier readPt Put put = new Put(Bytes.toBytes("19996")); put.addColumn(cf1, col, Bytes.toBytes("val")); @@ -6462,8 +6465,7 @@ public void testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception { byte[][] families = { cf1 }; byte[] col = Bytes.toBytes("C"); - HBaseConfiguration conf = new HBaseConfiguration(); - this.region = initHRegion(tableName, method, conf, families); + this.region = initHRegion(tableName, method, CONF, families); Put put = new Put(Bytes.toBytes("199996")); put.addColumn(cf1, col, Bytes.toBytes("val")); @@ -7364,4 +7366,226 @@ protected List doCompaction(CompactionRequestImpl cr, return super.doCompaction(cr, filesToCompact, user, compactionStartTime, newFiles); } } + + @Test + public void testCloseNoInterrupt() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Disable close thread interrupt and server abort behavior + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, false); + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, 1000); + region = initHRegion(tableName, method, conf, families); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean holderInterrupted = new AtomicBoolean(); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + latch.countDown(); + try { + Thread.sleep(SLEEP_TIME); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + holderInterrupted.set(true); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + region.close(); + region = null; + holder.join(); + + assertFalse("Region lock holder should not have been interrupted", holderInterrupted.get()); + } + + @Test + public void testCloseInterrupt() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Enable close thread interrupt and server abort behavior + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + // Speed up the unit test, no need to wait default 10 seconds. + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, 1000); + region = initHRegion(tableName, method, conf, families); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean holderInterrupted = new AtomicBoolean(); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + latch.countDown(); + try { + Thread.sleep(SLEEP_TIME); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + holderInterrupted.set(true); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + region.close(); + region = null; + holder.join(); + + assertTrue("Region lock holder was not interrupted", holderInterrupted.get()); + } + + @Test + public void testCloseAbort() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Enable close thread interrupt and server abort behavior. + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + // Set the abort interval to a fraction of sleep time so we are guaranteed to be aborted. + conf.setInt(HRegion.CLOSE_WAIT_TIME, SLEEP_TIME / 2); + // Set the wait interval to a fraction of sleep time so we are guaranteed to be interrupted. + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, SLEEP_TIME / 4); + region = initHRegion(tableName, method, conf, families); + RegionServerServices rsServices = mock(RegionServerServices.class); + when(rsServices.getServerName()).thenReturn(ServerName.valueOf("localhost", 1000, 1000)); + region.rsServices = rsServices; + + final CountDownLatch latch = new CountDownLatch(1); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + latch.countDown(); + // Hold the lock for SLEEP_TIME seconds no matter how many times we are interrupted. + int timeRemaining = SLEEP_TIME; + while (timeRemaining > 0) { + long start = EnvironmentEdgeManager.currentTime(); + try { + Thread.sleep(timeRemaining); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + } + long end = EnvironmentEdgeManager.currentTime(); + timeRemaining -= end - start; + if (timeRemaining < 0) { + timeRemaining = 0; + } + if (timeRemaining > 0) { + LOG.info("Sleeping again, remaining time " + timeRemaining + " ms"); + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + try { + region.close(); + } catch (IOException e) { + LOG.info("Caught expected exception", e); + } + region = null; + holder.join(); + + // Verify the region tried to abort the server + verify(rsServices, atLeast(1)).abort(anyString(),any()); + } + + @Test + public void testInterruptProtection() throws Exception { + byte[] cf1 = Bytes.toBytes("CF1"); + byte[][] families = { cf1 }; + final int SLEEP_TIME = 10 * 1000; + + Configuration conf = new Configuration(CONF); + // Enable close thread interrupt and server abort behavior. + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + conf.setInt(HRegion.CLOSE_WAIT_INTERVAL, 1000); + region = initHRegion(tableName, method, conf, families); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean holderInterrupted = new AtomicBoolean(); + Thread holder = new Thread(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting region operation holder"); + region.startRegionOperation(Operation.SCAN); + LOG.info("Protecting against interrupts"); + region.disableInterrupts(); + try { + latch.countDown(); + try { + Thread.sleep(SLEEP_TIME); + } catch (InterruptedException e) { + LOG.info("Interrupted"); + holderInterrupted.set(true); + } + } finally { + region.enableInterrupts(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + region.closeRegionOperation(); + } catch (IOException e) { + } + LOG.info("Stopped region operation holder"); + } + } + }); + + holder.start(); + latch.await(); + region.close(); + region = null; + holder.join(); + + assertFalse("Region lock holder should not have been interrupted", holderInterrupted.get()); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index a8c12052bb16..9ecdc455f5f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -1709,6 +1709,6 @@ private void putDataByReplay(HRegion region, private static HRegion initHRegion(byte[] tableName, byte[]... families) throws IOException { return TEST_UTIL.createLocalHRegion(TableName.valueOf(tableName), HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, false, Durability.SYNC_WAL, null, families); + HConstants.EMPTY_END_ROW, CONF, false, Durability.SYNC_WAL, null, families); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java index 59a0741721ba..e64994aa310b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -51,7 +53,8 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion { */ @Override public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) + throws IOException { boolean[] inMemory = new boolean[families.length]; for(int i = 0; i < inMemory.length; i++) { inMemory[i] = true; @@ -59,7 +62,7 @@ public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, - isReadOnly, durability, wal, inMemory, families); + conf, isReadOnly, durability, wal, inMemory, families); } @Override int getTestCountForTestWritesWhileScanning() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java index 710042e9c276..4792869b2f90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java @@ -87,7 +87,7 @@ private HRegion getRegion(final Configuration conf, final String tableName) thro ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(TableName.valueOf(tableName), HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES); + HConstants.EMPTY_BYTE_ARRAY, conf, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES); } private void closeRegion(final HRegion region) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java new file mode 100644 index 000000000000..10fa0b9af755 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java @@ -0,0 +1,363 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.filter.FilterBase; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALEdit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({RegionServerTests.class, LargeTests.class}) +public class TestRegionInterrupt { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionInterrupt.class); + + private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final Logger LOG = LoggerFactory.getLogger(TestRegionInterrupt.class); + + static final byte[] FAMILY = Bytes.toBytes("info"); + + static long sleepTime; + + @Rule + public TableNameTestRule name = new TableNameTestRule(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + conf.setClass(HConstants.REGION_IMPL, InterruptInterceptingHRegion.class, Region.class); + conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); + // Ensure the sleep interval is long enough for interrupts to occur. + long waitInterval = conf.getLong(HRegion.CLOSE_WAIT_INTERVAL, + HRegion.DEFAULT_CLOSE_WAIT_INTERVAL); + sleepTime = waitInterval * 2; + // Try to bound the running time of this unit if expected actions do not take place. + conf.setLong(HRegion.CLOSE_WAIT_TIME, sleepTime * 2); + } + + @Before + public void setUp() throws Exception { + TEST_UTIL.startMiniCluster(); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testCloseInterruptScanning() throws Exception { + final TableName tableName = name.getTableName(); + LOG.info("Creating table " + tableName); + try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { + // load some data + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + TEST_UTIL.loadTable(table, FAMILY); + final AtomicBoolean expectedExceptionCaught = new AtomicBoolean(false); + // scan the table in the background + Thread scanner = new Thread(new Runnable() { + @Override + public void run() { + Scan scan = new Scan(); + scan.addFamily(FAMILY); + scan.setFilter(new DelayingFilter()); + try { + LOG.info("Starting scan"); + try (ResultScanner rs = table.getScanner(scan)) { + Result r; + do { + r = rs.next(); + if (r != null) { + LOG.info("Scanned row " + Bytes.toStringBinary(r.getRow())); + } + } while (r != null); + } + } catch (IOException e) { + LOG.info("Scanner caught exception", e); + expectedExceptionCaught.set(true); + } finally { + LOG.info("Finished scan"); + } + } + }); + scanner.start(); + + // Wait for the filter to begin sleeping + LOG.info("Waiting for scanner to start"); + Waiter.waitFor(TEST_UTIL.getConfiguration(), 10*1000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return DelayingFilter.isSleeping(); + } + }); + + // Offline the table, this will trigger closing + LOG.info("Offlining table " + tableName); + TEST_UTIL.getAdmin().disableTable(tableName); + + // Wait for scanner termination + scanner.join(); + + // When we get here the region has closed and the table is offline + assertTrue("Region operations were not interrupted", + InterruptInterceptingHRegion.wasInterrupted()); + assertTrue("Scanner did not catch expected exception", expectedExceptionCaught.get()); + } + } + + @Test + public void testCloseInterruptMutation() throws Exception { + final TableName tableName = name.getTableName(); + final Admin admin = TEST_UTIL.getAdmin(); + // Create the test table + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(MutationDelayingCoprocessor.class.getName()) + .build(); + LOG.info("Creating table " + tableName); + admin.createTable(htd); + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + + // Insert some data in the background + LOG.info("Starting writes to table " + tableName); + final int NUM_ROWS = 100; + final AtomicBoolean expectedExceptionCaught = new AtomicBoolean(false); + Thread inserter = new Thread(new Runnable() { + @Override + public void run() { + try (BufferedMutator t = admin.getConnection().getBufferedMutator(tableName)) { + for (int i = 0; i < NUM_ROWS; i++) { + LOG.info("Writing row " + i + " to " + tableName); + byte[] value = new byte[10], row = Bytes.toBytes(Integer.toString(i)); + Bytes.random(value); + t.mutate(new Put(row).addColumn(FAMILY, HConstants.EMPTY_BYTE_ARRAY, value)); + t.flush(); + } + } catch (IOException e) { + LOG.info("Inserter caught exception", e); + expectedExceptionCaught.set(true); + } + } + }); + inserter.start(); + + // Wait for delayed insertion to begin + LOG.info("Waiting for mutations to start"); + Waiter.waitFor(TEST_UTIL.getConfiguration(), 10*1000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return MutationDelayingCoprocessor.isSleeping(); + } + }); + + // Offline the table, this will trigger closing + LOG.info("Offlining table " + tableName); + admin.disableTable(tableName); + + // Wait for the inserter to finish + inserter.join(); + + // When we get here the region has closed and the table is offline + assertTrue("Region operations were not interrupted", + InterruptInterceptingHRegion.wasInterrupted()); + assertTrue("Inserter did not catch expected exception", expectedExceptionCaught.get()); + + } + + public static class InterruptInterceptingHRegion extends HRegion { + + private static boolean interrupted = false; + + public static boolean wasInterrupted() { + return interrupted; + } + + public InterruptInterceptingHRegion(Path tableDir, WAL wal, FileSystem fs, + Configuration conf, RegionInfo regionInfo, TableDescriptor htd, + RegionServerServices rsServices) { + super(tableDir, wal, fs, conf, regionInfo, htd, rsServices); + } + + public InterruptInterceptingHRegion(HRegionFileSystem fs, WAL wal, Configuration conf, + TableDescriptor htd, RegionServerServices rsServices) { + super(fs, wal, conf, htd, rsServices); + } + + @Override + void checkInterrupt() throws NotServingRegionException, InterruptedIOException { + try { + super.checkInterrupt(); + } catch (NotServingRegionException | InterruptedIOException e) { + interrupted = true; + throw e; + } + } + + @Override + IOException throwOnInterrupt(Throwable t) { + interrupted = true; + return super.throwOnInterrupt(t); + } + + } + + public static class DelayingFilter extends FilterBase { + + static volatile boolean sleeping = false; + + public static boolean isSleeping() { + return sleeping; + } + + @Override + public ReturnCode filterCell(Cell v) throws IOException { + LOG.info("Starting sleep on " + v); + sleeping = true; + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + // restore interrupt status so region scanner can handle it as expected + Thread.currentThread().interrupt(); + LOG.info("Interrupted during sleep on " + v); + } finally { + LOG.info("Done sleep on " + v); + sleeping = false; + } + return ReturnCode.INCLUDE; + } + + public static DelayingFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + // Just return a new instance. + return new DelayingFilter(); + } + + } + + public static class MutationDelayingCoprocessor implements RegionCoprocessor, RegionObserver { + + static volatile boolean sleeping = false; + + public static boolean isSleeping() { + return sleeping; + } + + private void doSleep(Region.Operation op) { + LOG.info("Starting sleep for " + op); + sleeping = true; + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + // restore interrupt status so doMiniBatchMutation etc. can handle it as expected + Thread.currentThread().interrupt(); + LOG.info("Interrupted during " + op); + } finally { + LOG.info("Done"); + sleeping = false; + } + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void prePut(ObserverContext c, Put put, WALEdit edit, + Durability durability) throws IOException { + doSleep(Region.Operation.PUT); + RegionObserver.super.prePut(c, put, edit, durability); + } + + @Override + public void preDelete(ObserverContext c, Delete delete, + WALEdit edit, Durability durability) throws IOException { + doSleep(Region.Operation.DELETE); + RegionObserver.super.preDelete(c, delete, edit, durability); + } + + @Override + public Result preAppend(ObserverContext c, Append append) + throws IOException { + doSleep(Region.Operation.APPEND); + return RegionObserver.super.preAppend(c, append); + } + + @Override + public Result preIncrement(ObserverContext c, Increment increment) + throws IOException { + doSleep(Region.Operation.INCREMENT); + return RegionObserver.super.preIncrement(c, increment); + } + + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index ce7919e36eec..e850853b60e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -226,7 +226,7 @@ public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { // There is no 'stop' once a logRoller is running.. it just dies. logRoller.start(); // Now get a region and start adding in edits. - final HRegion region = initHRegion(tableName, null, null, dodgyWAL); + final HRegion region = initHRegion(tableName, null, null, CONF, dodgyWAL); byte [] bytes = Bytes.toBytes(getName()); NavigableMap scopes = new TreeMap<>( Bytes.BYTES_COMPARATOR); @@ -557,11 +557,11 @@ public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} * when done. */ - private static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal) - throws IOException { + private static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + Configuration conf, WAL wal) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, false, Durability.SYNC_WAL, - wal, COLUMN_FAMILY_BYTES); + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, + Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index bdc516ce724b..8a82848f3658 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -563,7 +563,7 @@ private HRegion createHoldingHRegion(Configuration conf, TableDescriptor htd, WA RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - TEST_UTIL.createLocalHRegion(hri, htd, wal).close(); + TEST_UTIL.createLocalHRegion(hri, CONF, htd, wal).close(); RegionServerServices rsServices = mock(RegionServerServices.class); when(rsServices.getServerName()).thenReturn(ServerName.valueOf("localhost:12345", 123456)); when(rsServices.getConfiguration()).thenReturn(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java index a655bdaf7c14..e763896d8df7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java @@ -168,7 +168,7 @@ public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - final HRegion region = TEST_UTIL.createLocalHRegion(hri, htd, log); + final HRegion region = TEST_UTIL.createLocalHRegion(hri, CONF, htd, log); ExecutorService exec = Executors.newFixedThreadPool(2); // do a regular write first because of memstore size calculation. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java index 2dd948c290df..0daeb13b16ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java @@ -89,7 +89,7 @@ public void testWALDurability() throws IOException { FileSystem fs = FileSystem.get(conf); Path rootDir = new Path(dir + getName()); T wal = getWAL(fs, rootDir, getName(), conf); - HRegion region = initHRegion(tableName, null, null, wal); + HRegion region = initHRegion(tableName, null, null, conf, wal); try { resetSyncFlag(wal); assertNull(getSyncFlag(wal)); @@ -114,7 +114,7 @@ public void testWALDurability() throws IOException { conf.set(HRegion.WAL_HSYNC_CONF_KEY, "true"); fs = FileSystem.get(conf); wal = getWAL(fs, rootDir, getName(), conf); - region = initHRegion(tableName, null, null, wal); + region = initHRegion(tableName, null, null, conf, wal); try { resetSyncFlag(wal); @@ -156,11 +156,11 @@ private String getName() { * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} * when done. */ - public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal) - throws IOException { + public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + Configuration conf, WAL wal) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, false, Durability.USE_DEFAULT, - wal, COLUMN_FAMILY_BYTES); + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, + Durability.USE_DEFAULT, wal, COLUMN_FAMILY_BYTES); } } From 5cb382295199186ccf9a01d6b8299d42c548ad4b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 4 Nov 2020 11:02:28 +0800 Subject: [PATCH 481/769] HBASE-25235 Cleanup the deprecated methods in TimeRange (#2616) Signed-off-by: Jan Hentschel Signed-off-by: stack --- .../apache/hadoop/hbase/client/Append.java | 5 +- .../org/apache/hadoop/hbase/client/Get.java | 10 +-- .../apache/hadoop/hbase/client/Increment.java | 5 +- .../org/apache/hadoop/hbase/client/Query.java | 2 +- .../org/apache/hadoop/hbase/client/Scan.java | 4 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 16 +++- .../org/apache/hadoop/hbase/io/TimeRange.java | 86 ++----------------- .../hbase/regionserver/TimeRangeTracker.java | 2 +- .../coprocessor/TestAppendTimeRange.java | 8 +- .../coprocessor/TestIncrementTimeRange.java | 4 +- .../TestSimpleTimeRangeTracker.java | 14 +-- 11 files changed, 43 insertions(+), 113 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 922f46703eb5..41b3845fc784 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -68,7 +68,7 @@ public class Append extends Mutation { * @return this */ public Append setTimeRange(long minStamp, long maxStamp) { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -162,10 +162,9 @@ public Append addColumn(byte[] family, byte[] qualifier, byte[] value) { /** * Add column and value to this Append operation. - * @param cell * @return This instance */ - @SuppressWarnings("unchecked") + @Override public Append add(final Cell cell) { try { super.add(cell); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index d3b57fb461cf..0f04407ac3e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -74,7 +74,6 @@ public class Get extends Query implements Row { private int storeOffset = 0; private TimeRange tr = TimeRange.allTime(); private boolean checkExistenceOnly = false; - private boolean closestRowBefore = false; private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** @@ -199,11 +198,10 @@ public Get addColumn(byte [] family, byte [] qualifier) { * [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive - * @throws IOException * @return this for invocation chaining */ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -214,17 +212,17 @@ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { */ public Get setTimestamp(long timestamp) { try { - tr = new TimeRange(timestamp, timestamp + 1); + tr = TimeRange.at(timestamp); } catch(Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; } - return this; } - @Override public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { + @Override + public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { return (Get) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index df448eb91b6a..bd824d4a855f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -148,9 +148,8 @@ public TimeRange getTimeRange() { * @throws IOException if invalid time range * @return this */ - public Increment setTimeRange(long minStamp, long maxStamp) - throws IOException { - tr = new TimeRange(minStamp, maxStamp); + public Increment setTimeRange(long minStamp, long maxStamp) throws IOException { + tr = TimeRange.between(minStamp, maxStamp); return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 1d990d1bc942..919513ceb622 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -230,7 +230,7 @@ public boolean doLoadColumnFamiliesOnDemand() { */ public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { - colFamTimeRangeMap.put(cf, new TimeRange(minStamp, maxStamp)); + colFamTimeRangeMap.put(cf, TimeRange.between(minStamp, maxStamp)); return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index d515c550f0e9..36b116bd90af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -316,7 +316,7 @@ public Scan addColumn(byte [] family, byte [] qualifier) { * @return this */ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException { - tr = new TimeRange(minStamp, maxStamp); + tr = TimeRange.between(minStamp, maxStamp); return this; } @@ -350,7 +350,7 @@ public Scan setTimeStamp(long timestamp) */ public Scan setTimestamp(long timestamp) { try { - tr = new TimeRange(timestamp, timestamp + 1); + tr = TimeRange.at(timestamp); } catch(Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index d5fdb89302c5..772183251634 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2861,10 +2861,18 @@ public static List toSecurityCapabilityList( } public static TimeRange toTimeRange(HBaseProtos.TimeRange timeRange) { - return timeRange == null ? - TimeRange.allTime() : - new TimeRange(timeRange.hasFrom() ? timeRange.getFrom() : 0, - timeRange.hasTo() ? timeRange.getTo() : Long.MAX_VALUE); + if (timeRange == null) { + return TimeRange.allTime(); + } + if (timeRange.hasFrom()) { + if (timeRange.hasTo()) { + return TimeRange.between(timeRange.getFrom(), timeRange.getTo()); + } else { + return TimeRange.from(timeRange.getFrom()); + } + } else { + return TimeRange.until(timeRange.getTo()); + } } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index fe229b692109..0dea94801b8a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -18,24 +18,23 @@ package org.apache.hadoop.hbase.io; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** * Represents an interval of version timestamps. Presumes timestamps between * {@link #INITIAL_MIN_TIMESTAMP} and {@link #INITIAL_MAX_TIMESTAMP} only. Gets freaked out if * passed a timestamp that is < {@link #INITIAL_MIN_TIMESTAMP}, - *

    + *

    * Evaluated according to minStamp <= timestamp < maxStamp or [minStamp,maxStamp) in interval * notation. - *

    + *

    * Can be returned and read by clients. Should not be directly created by clients. Thus, all * constructors are purposely @InterfaceAudience.Private. - *

    + *

    * Immutable. Thread-safe. */ @InterfaceAudience.Public -public class TimeRange { +public final class TimeRange { public static final long INITIAL_MIN_TIMESTAMP = 0L; public static final long INITIAL_MAX_TIMESTAMP = Long.MAX_VALUE; private static final TimeRange ALL_TIME = new TimeRange(INITIAL_MIN_TIMESTAMP, @@ -84,67 +83,13 @@ public static TimeRange between(long minStamp, long maxStamp) { private final long maxStamp; private final boolean allTime; - /** - * Default constructor. - * Represents interval [0, Long.MAX_VALUE) (allTime) - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange() { - this(INITIAL_MIN_TIMESTAMP, INITIAL_MAX_TIMESTAMP); - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(long minStamp) { - this(minStamp, INITIAL_MAX_TIMESTAMP); - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(byte [] minStamp) { - this(Bytes.toLong(minStamp)); - } - - /** - * Represents interval [minStamp, maxStamp) - * @param minStamp the minimum timestamp, inclusive - * @param maxStamp the maximum timestamp, exclusive - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. - */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(byte [] minStamp, byte [] maxStamp) { - this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); - } - /** * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive * @throws IllegalArgumentException if either <0, - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. */ - @Deprecated - @InterfaceAudience.Private - public TimeRange(long minStamp, long maxStamp) { - check(minStamp, maxStamp); + private TimeRange(long minStamp, long maxStamp) { this.minStamp = minStamp; this.maxStamp = maxStamp; this.allTime = isAllTime(minStamp, maxStamp); @@ -188,27 +133,8 @@ public boolean isAllTime() { /** * Check if the specified timestamp is within this TimeRange. - *

    + *

    * Returns true if within interval [minStamp, maxStamp), false if not. - * @param bytes timestamp to check - * @param offset offset into the bytes - * @return true if within TimeRange, false if not - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above and may be - * changed to private or removed in 3.0. Use {@link #withinTimeRange(long)} instead - */ - @Deprecated - public boolean withinTimeRange(byte [] bytes, int offset) { - if (allTime) { - return true; - } - return withinTimeRange(Bytes.toLong(bytes, offset)); - } - - /** - * Check if the specified timestamp is within this TimeRange. - *

    - * Returns true if within interval [minStamp, maxStamp), false - * if not. * @param timestamp timestamp to check * @return true if within TimeRange, false if not */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 18175648f305..37923ad4d464 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -240,7 +240,7 @@ TimeRange toTimeRange() { if (max == INITIAL_MAX_TIMESTAMP) { max = TimeRange.INITIAL_MAX_TIMESTAMP; } - return new TimeRange(min, max); + return TimeRange.between(min, max); } @VisibleForTesting diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java index 51f0d7307c02..8dfc774300ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java @@ -128,15 +128,15 @@ public void testHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range10 = new TimeRange(1, time + 10); - Result r = table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")) - .setTimeRange(range10.getMin(), range10.getMax())); + TimeRange range10 = TimeRange.between(1, time + 10); + table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")) + .setTimeRange(range10.getMin(), range10.getMax())); checkRowValue(table, ROW, Bytes.toBytes("ab")); assertEquals(MyObserver.tr10.getMin(), range10.getMin()); assertEquals(MyObserver.tr10.getMax(), range10.getMax()); time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range2 = new TimeRange(1, time+20); + TimeRange range2 = TimeRange.between(1, time + 20); List actions = Arrays.asList(new Row[] { new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java index a74914f62135..1e822e4d2164 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java @@ -165,7 +165,7 @@ private void checkHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range10 = new TimeRange(1, time+10); + TimeRange range10 = TimeRange.between(1, time+10); hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L) .setTimeRange(range10.getMin(), range10.getMax())); checkRowValue(ROW_A, Bytes.toBytes(11L)); @@ -174,7 +174,7 @@ private void checkHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range2 = new TimeRange(1, time+20); + TimeRange range2 = TimeRange.between(1, time + 20); List actions = Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L) .setTimeRange(range2.getMin(), range2.getMax()), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java index 2858419f0378..2a3f018562cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java @@ -61,7 +61,7 @@ public void testExtreme() { @Test public void testTimeRangeInitialized() { TimeRangeTracker src = getTimeRangeTracker(); - TimeRange tr = new TimeRange(System.currentTimeMillis()); + TimeRange tr = TimeRange.from(System.currentTimeMillis()); assertFalse(src.includesTimeRange(tr)); } @@ -108,7 +108,7 @@ public void testSimpleInRange() { TimeRangeTracker trr = getTimeRangeTracker(); trr.includeTimestamp(0); trr.includeTimestamp(2); - assertTrue(trr.includesTimeRange(new TimeRange(1))); + assertTrue(trr.includesTimeRange(TimeRange.from(1))); } @Test @@ -118,27 +118,27 @@ public void testRangeConstruction() throws IOException { assertEquals(Long.MAX_VALUE, defaultRange.getMax()); assertTrue(defaultRange.isAllTime()); - TimeRange oneArgRange = new TimeRange(0L); + TimeRange oneArgRange = TimeRange.from(0L); assertEquals(0L, oneArgRange.getMin()); assertEquals(Long.MAX_VALUE, oneArgRange.getMax()); assertTrue(oneArgRange.isAllTime()); - TimeRange oneArgRange2 = new TimeRange(1); + TimeRange oneArgRange2 = TimeRange.from(1); assertEquals(1, oneArgRange2.getMin()); assertEquals(Long.MAX_VALUE, oneArgRange2.getMax()); assertFalse(oneArgRange2.isAllTime()); - TimeRange twoArgRange = new TimeRange(0L, Long.MAX_VALUE); + TimeRange twoArgRange = TimeRange.between(0L, Long.MAX_VALUE); assertEquals(0L, twoArgRange.getMin()); assertEquals(Long.MAX_VALUE, twoArgRange.getMax()); assertTrue(twoArgRange.isAllTime()); - TimeRange twoArgRange2 = new TimeRange(0L, Long.MAX_VALUE - 1); + TimeRange twoArgRange2 = TimeRange.between(0L, Long.MAX_VALUE - 1); assertEquals(0L, twoArgRange2.getMin()); assertEquals(Long.MAX_VALUE - 1, twoArgRange2.getMax()); assertFalse(twoArgRange2.isAllTime()); - TimeRange twoArgRange3 = new TimeRange(1, Long.MAX_VALUE); + TimeRange twoArgRange3 = TimeRange.between(1, Long.MAX_VALUE); assertEquals(1, twoArgRange3.getMin()); assertEquals(Long.MAX_VALUE, twoArgRange3.getMax()); assertFalse(twoArgRange3.isAllTime()); From 603eb8b6e40d1ff2eb2796454ef57b30ab8bc72e Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 4 Nov 2020 17:54:18 +0800 Subject: [PATCH 482/769] HBASE-25216 The client zk syncer should deal with meta replica count change (#2614) Signed-off-by: Yu Li --- .../apache/hadoop/hbase/master/HMaster.java | 8 +- .../hadoop/hbase/master/MasterServices.java | 8 + .../procedure/ModifyTableProcedure.java | 7 + .../hbase/master/zksyncer/ClientZKSyncer.java | 216 +++++++++++++----- .../master/zksyncer/MasterAddressSyncer.java | 13 +- .../master/zksyncer/MetaLocationSyncer.java | 24 +- .../client/TestSeparateClientZKCluster.java | 62 +++-- .../hbase/master/MockNoopMasterServices.java | 6 + .../hbase/master/TestMasterNoCluster.java | 2 +- 9 files changed, 244 insertions(+), 102 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index f9123046eef2..8cb399a476e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -322,8 +322,9 @@ public void run() { // Tracker for load balancer state LoadBalancerTracker loadBalancerTracker; // Tracker for meta location, if any client ZK quorum specified - MetaLocationSyncer metaLocationSyncer; + private MetaLocationSyncer metaLocationSyncer; // Tracker for active master location, if any client ZK quorum specified + @VisibleForTesting MasterAddressSyncer masterAddressSyncer; // Tracker for auto snapshot cleanup state SnapshotCleanupTracker snapshotCleanupTracker; @@ -3852,4 +3853,9 @@ public CompactionState getCompactionState(final TableName tableName) { } return compactionState; } + + @Override + public MetaLocationSyncer getMetaLocationSyncer() { + return metaLocationSyncer; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 384785d738f6..c5f0f3c4bcad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.LockedResource; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -570,4 +571,11 @@ default SplitWALManager getSplitWALManager(){ */ boolean normalizeRegions( final NormalizeTableFilterParams ntfp, final boolean isHighPriority) throws IOException; + + /** + * Get the meta location syncer. + *

    + * We need to get this in MTP to tell the syncer the new meta replica count. + */ + MetaLocationSyncer getMetaLocationSyncer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index beb129b6f52b..247dd9c202f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; @@ -157,6 +158,12 @@ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableS break; case MODIFY_TABLE_ASSIGN_NEW_REPLICAS: assignNewReplicasIfNeeded(env); + if (TableName.isMetaTableName(getTableName())) { + MetaLocationSyncer syncer = env.getMasterServices().getMetaLocationSyncer(); + if (syncer != null) { + syncer.setMetaReplicaCount(modifiedTableDescriptor.getRegionReplication()); + } + } if (deleteColumnFamilyInModify) { setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java index 38dc11218687..51208e37d4e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java @@ -19,12 +19,11 @@ package org.apache.hadoop.hbase.master.zksyncer; import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; +import java.util.Iterator; import java.util.Map; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.util.Threads; @@ -34,7 +33,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,22 +40,68 @@ * Tracks the target znode(s) on server ZK cluster and synchronize them to client ZK cluster if * changed *

    - * The target znode(s) is given through {@link #getNodesToWatch()} method + * The target znode(s) is given through {@link #getPathsToWatch()} method */ @InterfaceAudience.Private public abstract class ClientZKSyncer extends ZKListener { private static final Logger LOG = LoggerFactory.getLogger(ClientZKSyncer.class); private final Server server; private final ZKWatcher clientZkWatcher; + + /** + * Used to store the newest data which we want to sync to client zk. + *

    + * For meta location, since we may reduce the replica number, so here we add a {@code delete} flag + * to tell the updater delete the znode on client zk and quit. + */ + private static final class ZKData { + + byte[] data; + + boolean delete = false; + + synchronized void set(byte[] data) { + this.data = data; + notifyAll(); + } + + synchronized byte[] get() throws InterruptedException { + while (!delete && data == null) { + wait(); + } + byte[] d = data; + data = null; + return d; + } + + synchronized void delete() { + this.delete = true; + notifyAll(); + } + + synchronized boolean isDeleted() { + return delete; + } + } + // We use queues and daemon threads to synchronize the data to client ZK cluster // to avoid blocking the single event thread for watchers - private final Map> queues; + private final ConcurrentMap queues; public ClientZKSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server server) { super(watcher); this.server = server; this.clientZkWatcher = clientZkWatcher; - this.queues = new HashMap<>(); + this.queues = new ConcurrentHashMap<>(); + } + + private void startNewSyncThread(String path) { + ZKData zkData = new ZKData(); + queues.put(path, zkData); + Thread updater = new ClientZkUpdater(path, zkData); + updater.setDaemon(true); + updater.start(); + watchAndCheckExists(path); } /** @@ -69,17 +113,12 @@ public void start() throws KeeperException { this.watcher.registerListener(this); // create base znode on remote ZK ZKUtil.createWithParents(clientZkWatcher, watcher.getZNodePaths().baseZNode); - // set meta znodes for client ZK - Collection nodes = getNodesToWatch(); - LOG.debug("Znodes to watch: " + nodes); + // set znodes for client ZK + Set paths = getPathsToWatch(); + LOG.debug("ZNodes to watch: {}", paths); // initialize queues and threads - for (String node : nodes) { - BlockingQueue queue = new ArrayBlockingQueue<>(1); - queues.put(node, queue); - Thread updater = new ClientZkUpdater(node, queue); - updater.setDaemon(true); - updater.start(); - watchAndCheckExists(node); + for (String path : paths) { + startNewSyncThread(path); } } @@ -112,10 +151,9 @@ private void watchAndCheckExists(String node) { * @param data the data to write to queue */ private void upsertQueue(String node, byte[] data) { - BlockingQueue queue = queues.get(node); - synchronized (queue) { - queue.poll(); - queue.offer(data); + ZKData zkData = queues.get(node); + if (zkData != null) { + zkData.set(data); } } @@ -126,35 +164,49 @@ private void upsertQueue(String node, byte[] data) { * @param data the data to set to client ZK * @throws InterruptedException if the thread is interrupted during process */ - private final void setDataForClientZkUntilSuccess(String node, byte[] data) - throws InterruptedException { + private void setDataForClientZkUntilSuccess(String node, byte[] data) + throws InterruptedException { + boolean create = false; while (!server.isStopped()) { try { LOG.debug("Set data for remote " + node + ", client zk wather: " + clientZkWatcher); - ZKUtil.setData(clientZkWatcher, node, data); - break; - } catch (KeeperException.NoNodeException nne) { - // Node doesn't exist, create it and set value - try { + if (create) { ZKUtil.createNodeIfNotExistsNoWatch(clientZkWatcher, node, data, CreateMode.PERSISTENT); - break; - } catch (KeeperException.ConnectionLossException - | KeeperException.SessionExpiredException ee) { - reconnectAfterExpiration(); - } catch (KeeperException e) { - LOG.warn( - "Failed to create znode " + node + " due to: " + e.getMessage() + ", will retry later"); + } else { + ZKUtil.setData(clientZkWatcher, node, data); } - } catch (KeeperException.ConnectionLossException - | KeeperException.SessionExpiredException ee) { - reconnectAfterExpiration(); + break; } catch (KeeperException e) { - LOG.debug("Failed to set data to client ZK, will retry later", e); + LOG.debug("Failed to set data for {} to client ZK, will retry later", node, e); + if (e.code() == KeeperException.Code.SESSIONEXPIRED) { + reconnectAfterExpiration(); + } + if (e.code() == KeeperException.Code.NONODE) { + create = true; + } + if (e.code() == KeeperException.Code.NODEEXISTS) { + create = false; + } } Threads.sleep(HConstants.SOCKET_RETRY_WAIT_MS); } } + private void deleteDataForClientZkUntilSuccess(String node) throws InterruptedException { + while (!server.isStopped()) { + LOG.debug("Delete remote " + node + ", client zk wather: " + clientZkWatcher); + try { + ZKUtil.deleteNode(clientZkWatcher, node); + } catch (KeeperException e) { + LOG.debug("Failed to delete node from client ZK, will retry later", e); + if (e.code() == KeeperException.Code.SESSIONEXPIRED) { + reconnectAfterExpiration(); + } + + } + } + } + private final void reconnectAfterExpiration() throws InterruptedException { LOG.warn("ZK session expired or lost. Retry a new connection..."); try { @@ -164,11 +216,7 @@ private final void reconnectAfterExpiration() throws InterruptedException { } } - @Override - public void nodeCreated(String path) { - if (!validate(path)) { - return; - } + private void getDataAndWatch(String path) { try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); upsertQueue(path, data); @@ -177,23 +225,39 @@ public void nodeCreated(String path) { } } + private void removeQueue(String path) { + ZKData zkData = queues.remove(path); + if (zkData != null) { + zkData.delete(); + } + } + @Override - public void nodeDataChanged(String path) { + public void nodeCreated(String path) { if (validate(path)) { - nodeCreated(path); + getDataAndWatch(path); + } else { + removeQueue(path); } } + @Override + public void nodeDataChanged(String path) { + nodeCreated(path); + } + @Override public synchronized void nodeDeleted(String path) { if (validate(path)) { try { if (ZKUtil.watchAndCheckExists(watcher, path)) { - nodeCreated(path); + getDataAndWatch(path); } } catch (KeeperException e) { LOG.warn("Unexpected exception handling nodeDeleted event for path: " + path, e); } + } else { + removeQueue(path); } } @@ -202,41 +266,67 @@ public synchronized void nodeDeleted(String path) { * @param path the path to validate * @return true if the znode is watched by us */ - abstract boolean validate(String path); + protected abstract boolean validate(String path); /** - * @return the znode(s) to watch + * @return the zk path(s) to watch */ - abstract Collection getNodesToWatch() throws KeeperException; + protected abstract Set getPathsToWatch(); + + protected final void refreshWatchingList() { + Set newPaths = getPathsToWatch(); + LOG.debug("New ZNodes to watch: {}", newPaths); + Iterator> iter = queues.entrySet().iterator(); + // stop unused syncers + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + if (!newPaths.contains(entry.getKey())) { + iter.remove(); + entry.getValue().delete(); + } + } + // start new syncers + for (String newPath : newPaths) { + if (!queues.containsKey(newPath)) { + startNewSyncThread(newPath); + } + } + } /** * Thread to synchronize znode data to client ZK cluster */ - class ClientZkUpdater extends Thread { - final String znode; - final BlockingQueue queue; + private final class ClientZkUpdater extends Thread { + private final String znode; + private final ZKData zkData; - public ClientZkUpdater(String znode, BlockingQueue queue) { + public ClientZkUpdater(String znode, ZKData zkData) { this.znode = znode; - this.queue = queue; + this.zkData = zkData; setName("ClientZKUpdater-" + znode); } @Override public void run() { + LOG.debug("Client zk updater for znode {} started", znode); while (!server.isStopped()) { try { - byte[] data = queue.take(); - setDataForClientZkUntilSuccess(znode, data); - } catch (InterruptedException e) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Interrupted while checking whether need to update meta location to client zk"); + byte[] data = zkData.get(); + if (data != null) { + setDataForClientZkUntilSuccess(znode, data); + } else { + if (zkData.isDeleted()) { + deleteDataForClientZkUntilSuccess(znode); + break; + } } + } catch (InterruptedException e) { + LOG.debug("Interrupted while checking whether need to update meta location to client zk"); Thread.currentThread().interrupt(); break; } } + LOG.debug("Client zk updater for znode {} stopped", znode); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java index a9aa13cb93d3..ee04238d0b95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.java @@ -18,9 +18,8 @@ */ package org.apache.hadoop.hbase.master.zksyncer; -import java.util.ArrayList; -import java.util.Collection; - +import java.util.Collections; +import java.util.Set; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -39,14 +38,12 @@ public MasterAddressSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server } @Override - boolean validate(String path) { + protected boolean validate(String path) { return path.equals(masterAddressZNode); } @Override - Collection getNodesToWatch() { - ArrayList toReturn = new ArrayList<>(); - toReturn.add(masterAddressZNode); - return toReturn; + protected Set getPathsToWatch() { + return Collections.singleton(masterAddressZNode); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java index dca5cadf8adf..f6e38329ac34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java @@ -18,13 +18,12 @@ */ package org.apache.hadoop.hbase.master.zksyncer; -import java.util.Collection; +import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; /** * Tracks the meta region locations on server ZK cluster and synchronize them to client ZK cluster @@ -32,19 +31,28 @@ */ @InterfaceAudience.Private public class MetaLocationSyncer extends ClientZKSyncer { + + private volatile int metaReplicaCount = 1; + public MetaLocationSyncer(ZKWatcher watcher, ZKWatcher clientZkWatcher, Server server) { super(watcher, clientZkWatcher, server); } @Override - boolean validate(String path) { + protected boolean validate(String path) { return watcher.getZNodePaths().isMetaZNodePath(path); } @Override - Collection getNodesToWatch() throws KeeperException { - return watcher.getMetaReplicaNodes().stream() - .map(znode -> ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode)) - .collect(Collectors.toList()); + protected Set getPathsToWatch() { + return IntStream.range(0, metaReplicaCount) + .mapToObj(watcher.getZNodePaths()::getZNodeForReplica).collect(Collectors.toSet()); + } + + public void setMetaReplicaCount(int replicaCount) { + if (replicaCount != metaReplicaCount) { + metaReplicaCount = replicaCount; + refreshWatchingList(); + } } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 4665e8417de7..7fc955234557 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -26,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -35,13 +40,11 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,11 +64,11 @@ public class TestSeparateClientZKCluster { private final byte[] newVal = Bytes.toBytes("v2"); @Rule - public TestName name = new TestName(); + public TableNameTestRule name = new TableNameTestRule(); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSeparateClientZKCluster.class); + HBaseClassTestRule.forClass(TestSeparateClientZKCluster.class); @BeforeClass public static void beforeAllTests() throws Exception { @@ -78,13 +81,15 @@ public static void beforeAllTests() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.client.start.log.errors.counter", -1); TEST_UTIL.getConfiguration().setInt("zookeeper.recovery.retry", 1); // core settings for testing client ZK cluster + TEST_UTIL.getConfiguration().setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + ZKConnectionRegistry.class, ConnectionRegistry.class); TEST_UTIL.getConfiguration().set(HConstants.CLIENT_ZOOKEEPER_QUORUM, HConstants.LOCALHOST); TEST_UTIL.getConfiguration().setInt(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT, clientZkPort); // reduce zk session timeout to easier trigger session expiration TEST_UTIL.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, ZK_SESSION_TIMEOUT); // Start a cluster with 2 masters and 3 regionservers. - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numMasters(2).numRegionServers(3).numDataNodes(3).build(); + StartMiniClusterOption option = + StartMiniClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build(); TEST_UTIL.startMiniCluster(option); } @@ -97,7 +102,7 @@ public static void afterAllTests() throws Exception { @Test public void testBasicOperation() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); // create table Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) { @@ -113,7 +118,7 @@ public void testBasicOperation() throws Exception { Get get = new Get(row); Result result = table.get(get); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(value, result.getValue(family, qualifier)); + assertArrayEquals(value, result.getValue(family, qualifier)); } } @@ -133,24 +138,24 @@ public void testMasterSwitch() throws Exception { } LOG.info("Shutdown master {}", master.getServerName()); while (cluster.getMaster() == null || !cluster.getMaster().isInitialized()) { - LOG.info("Get master {}", cluster.getMaster() == null? "null": - cluster.getMaster().getServerName()); + LOG.info("Get master {}", + cluster.getMaster() == null ? "null" : cluster.getMaster().getServerName()); Thread.sleep(200); } LOG.info("Got master {}", cluster.getMaster().getServerName()); // confirm client access still works - Assert.assertTrue(admin.balance(false)); + assertTrue(admin.balance(false)); } } @Test public void testMetaRegionMove() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); // create table Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); - Table table = conn.getTable(tn); - RegionLocator locator = conn.getRegionLocator(tn)) { + Table table = conn.getTable(tn); + RegionLocator locator = conn.getRegionLocator(tn)) { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); @@ -191,13 +196,13 @@ public void testMetaRegionMove() throws Exception { table.put(put); result = table.get(get); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(newVal, result.getValue(family, qualifier)); + assertArrayEquals(newVal, result.getValue(family, qualifier)); } } @Test public void testMetaMoveDuringClientZkClusterRestart() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); // create table Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) { @@ -233,18 +238,18 @@ public void testMetaMoveDuringClientZkClusterRestart() throws Exception { Get get = new Get(row); Result result = table.get(get); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(value, result.getValue(family, qualifier)); + assertArrayEquals(value, result.getValue(family, qualifier)); } } @Test public void testAsyncTable() throws Exception { - TableName tn = TableName.valueOf(name.getMethodName()); + TableName tn = name.getTableName(); ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); TableDescriptorBuilder tableDescBuilder = - TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); + TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); try (AsyncConnection ASYNC_CONN = - ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { ASYNC_CONN.getAdmin().createTable(tableDescBuilder.build()).get(); AsyncTable table = ASYNC_CONN.getTable(tn); // put some data @@ -255,7 +260,22 @@ public void testAsyncTable() throws Exception { Get get = new Get(row); Result result = table.get(get).get(); LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier))); - Assert.assertArrayEquals(value, result.getValue(family, qualifier)); + assertArrayEquals(value, result.getValue(family, qualifier)); + } + } + + @Test + public void testChangeMetaReplicaCount() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + try (RegionLocator locator = + TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + assertEquals(1, locator.getAllRegionLocations().size()); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 3); + TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 3); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 2); + TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 2); + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); + TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 1); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 3f3e80960bb9..933addfbf600 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.LockedResource; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -514,4 +515,9 @@ public boolean isBalancerOn() { public boolean normalizeRegions(NormalizeTableFilterParams ntfp, boolean isHighPriority) { return false; } + + @Override + public MetaLocationSyncer getMetaLocationSyncer() { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 5979f4845a93..bdeab3d28a25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -160,7 +160,7 @@ public void testMasterInitWithObserverModeClientZKQuorum() throws Exception { while (!master.isInitialized()) { Threads.sleep(200); } - Assert.assertNull(master.metaLocationSyncer); + Assert.assertNull(master.getMetaLocationSyncer()); Assert.assertNull(master.masterAddressSyncer); master.stopMaster(); master.join(); From 530a99de7b28759830bd63f8ca7ff7e1ee88bfd3 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 4 Nov 2020 16:05:42 +0530 Subject: [PATCH 483/769] HBASE-25245 : Fixing incorrect maven and jdk names for generate-hbase-website Closes #2624 Signed-off-by: Duo Zhang --- .../jenkins-scripts/generate-hbase-website.Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile index 76b7d3d4140f..7e8ec44a4e6a 100644 --- a/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile +++ b/dev-support/jenkins-scripts/generate-hbase-website.Jenkinsfile @@ -37,9 +37,9 @@ pipeline { stages { stage ('generate hbase website') { tools { - maven 'Maven (latest)' + maven 'maven_latest' // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in. - jdk "JDK 1.8 (latest)" + jdk "jdk_1.8_latest" } steps { dir('hbase') { From c8d7ca9491e555bc5b9f5359c7db839da2f1355b Mon Sep 17 00:00:00 2001 From: niuyulin Date: Thu, 5 Nov 2020 00:21:31 +0800 Subject: [PATCH 484/769] HBASE-25053 WAL replay should ignore 0-length files (#2437) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/regionserver/HRegion.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index bca18dbcb013..d0e628432b13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5372,6 +5372,11 @@ long replayRecoveredEditsIfAny(Map maxSeqIdInStores, recoveredEditsDir); if (files != null) { for (FileStatus file : files) { + // it is safe to trust the zero-length in this case because we've been through rename and + // lease recovery in the above. + if (isZeroLengthThenDelete(fs, file, file.getPath())) { + continue; + } seqId = Math.max(seqId, replayRecoveredEdits(file.getPath(), maxSeqIdInStores, reporter, fs)); } @@ -6536,6 +6541,8 @@ protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreAccountin } /** + * make sure have been through lease recovery before get file status, so the file length can be + * trusted. * @param p File to check. * @return True if file was zero-length (and if so, we'll delete it in here). * @throws IOException From ea2ae43299a0d49ea01dca420f9ba31ecb5f668f Mon Sep 17 00:00:00 2001 From: WenFeiYi Date: Thu, 5 Nov 2020 19:55:08 +0530 Subject: [PATCH 485/769] HBASE-25240 gson format of RpcServer.logResponse is abnormal Closes #2623 Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/util/GsonUtil.java | 4 ++ .../hadoop/hbase/util/TestGsonUtil.java | 48 +++++++++++++++++++ .../apache/hadoop/hbase/ipc/RpcServer.java | 2 +- 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java index 80be4af72f13..59c2d80f4d18 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java @@ -58,4 +58,8 @@ public LongAdder read(JsonReader in) throws IOException { } }); } + + public static GsonBuilder createGsonWithDisableHtmlEscaping() { + return createGson().disableHtmlEscaping(); + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java new file mode 100644 index 000000000000..fbfc0b952dae --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestGsonUtil.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MiscTests.class, SmallTests.class }) +public class TestGsonUtil { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestGsonUtil.class); + + private static final Gson GSON = GsonUtil.createGson().create(); + private static final Gson DHE_GSON = GsonUtil.createGsonWithDisableHtmlEscaping().create(); + + @Test + public void testDisableHtmlEscaping() { + // enable html escaping, turn '=' into '\u003d' + assertEquals("\"\\u003d\\u003d\\u003d\"", GSON.toJson("===")); + + // disable html escaping + assertEquals("\"===\"", DHE_GSON.toJson("===")); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index cace5f0240f4..7bae06f601bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -200,7 +200,7 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String TRACE_LOG_MAX_LENGTH = "hbase.ipc.trace.log.max.length"; protected static final String KEY_WORD_TRUNCATED = " "; - protected static final Gson GSON = GsonUtil.createGson().create(); + protected static final Gson GSON = GsonUtil.createGsonWithDisableHtmlEscaping().create(); protected final int maxRequestSize; protected final int warnResponseTime; From 3ac1a5e9685767ab5ca4928501642cadd52c8587 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 5 Nov 2020 08:36:55 -0800 Subject: [PATCH 486/769] =?UTF-8?q?HBASE-25238=20Upgrading=20HBase=20from?= =?UTF-8?q?=202.2.0=20to=202.3.x=20fails=20because=20of=20=E2=80=9CMessage?= =?UTF-8?q?=20missing=20required=20fields:=20state=E2=80=9D=20(#2625)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make protobuf fields add post-2.0.0 release marked 'required' instead be 'optional' so migrations from 2.0.x to 2.1+ or 2.2+ succeeds. Signed-off-by: Viraj Jasani vjasani@apache.org --- .../apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 4 ++-- .../src/main/protobuf/server/ClusterStatus.proto | 7 +++++-- .../src/main/protobuf/server/master/MasterProcedure.proto | 4 +++- .../hbase/master/assignment/RegionRemoteProcedureBase.java | 5 ++++- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 772183251634..f425984a95f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2797,8 +2797,8 @@ public static ReplicationLoadSink toReplicationLoadSink( ClusterStatusProtos.ReplicationLoadSink rls) { return new ReplicationLoadSink(rls.getAgeOfLastAppliedOp(), rls.getTimeStampsOfLastAppliedOp(), - rls.getTimestampStarted(), - rls.getTotalOpsProcessed()); + rls.hasTimestampStarted()? rls.getTimestampStarted(): -1L, + rls.hasTotalOpsProcessed()? rls.getTotalOpsProcessed(): -1L); } public static ReplicationLoadSource toReplicationLoadSource( diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 35f3c2d054b5..dc875daf7976 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -207,8 +207,11 @@ message ClientMetrics { message ReplicationLoadSink { required uint64 ageOfLastAppliedOp = 1; required uint64 timeStampsOfLastAppliedOp = 2; - required uint64 timestampStarted = 3; - required uint64 totalOpsProcessed = 4; + // The below two were added after hbase-2.0.0 went out. They have to be added as 'optional' else + // we break upgrades; old RegionServers reporting in w/ old forms of this message will fail to + // deserialize on the new Master. See HBASE-25234 + optional uint64 timestampStarted = 3; + optional uint64 totalOpsProcessed = 4; } message ReplicationLoadSource { diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 8d8b9af009cd..76b085d43c8e 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -573,7 +573,9 @@ enum RegionRemoteProcedureBaseState { message RegionRemoteProcedureBaseStateData { required RegionInfo region = 1; required ServerName target_server = 2; - required RegionRemoteProcedureBaseState state = 3; + // state is actually 'required' but we can't set it as 'required' here else it breaks old + // Messages; see HBASE-22074. + optional RegionRemoteProcedureBaseState state = 3; optional RegionStateTransition.TransitionCode transition_code = 4; optional int64 seq_id = 5; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index 1c90d81ed06f..805b51caebec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -352,7 +352,10 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws serializer.deserialize(RegionRemoteProcedureBaseStateData.class); region = ProtobufUtil.toRegionInfo(data.getRegion()); targetServer = ProtobufUtil.toServerName(data.getTargetServer()); - state = data.getState(); + // 'state' may not be present if we are reading an 'old' form of this pb Message. + if (data.hasState()) { + state = data.getState(); + } if (data.hasTransitionCode()) { transitionCode = data.getTransitionCode(); seqId = data.getSeqId(); From 61109ea813a04e5e884f32455c3a4eef07647845 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 7 Nov 2020 20:05:04 +0800 Subject: [PATCH 487/769] HBASE-25252 Move HMaster inner classes out (#2628) Signed-off-by: Viraj Jasani Signed-off-by: Wellington Chevreuil Signed-off-by: Guanghao Zhang --- .../apache/hadoop/hbase/master/HMaster.java | 125 ++---------------- .../master/MasterInitializationMonitor.java | 80 +++++++++++ .../hbase/master/MasterRedirectServlet.java | 81 ++++++++++++ 3 files changed, 174 insertions(+), 112 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8cb399a476e5..573838f58709 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -50,10 +50,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; import java.util.stream.Collectors; -import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -95,7 +92,6 @@ import org.apache.hadoop.hbase.exceptions.MasterStoppedException; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -238,76 +234,23 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** - * HMaster is the "master server" for HBase. An HBase cluster has one active - * master. If many masters are started, all compete. Whichever wins goes on to - * run the cluster. All others park themselves in their constructor until - * master or cluster shutdown or until the active master loses its lease in - * zookeeper. Thereafter, all running master jostle to take over master role. - * - *

    The Master can be asked shutdown the cluster. See {@link #shutdown()}. In - * this case it will tell all regionservers to go down and then wait on them - * all reporting in that they are down. This master will then shut itself down. - * - *

    You can also shutdown just this master. Call {@link #stopMaster()}. - * + * HMaster is the "master server" for HBase. An HBase cluster has one active master. If many masters + * are started, all compete. Whichever wins goes on to run the cluster. All others park themselves + * in their constructor until master or cluster shutdown or until the active master loses its lease + * in zookeeper. Thereafter, all running master jostle to take over master role. + *

    + * The Master can be asked shutdown the cluster. See {@link #shutdown()}. In this case it will tell + * all regionservers to go down and then wait on them all reporting in that they are down. This + * master will then shut itself down. + *

    + * You can also shutdown just this master. Call {@link #stopMaster()}. * @see org.apache.zookeeper.Watcher */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @SuppressWarnings("deprecation") public class HMaster extends HRegionServer implements MasterServices { - private static Logger LOG = LoggerFactory.getLogger(HMaster.class); - - /** - * Protection against zombie master. Started once Master accepts active responsibility and - * starts taking over responsibilities. Allows a finite time window before giving up ownership. - */ - private static class InitializationMonitor extends Thread { - /** The amount of time in milliseconds to sleep before checking initialization status. */ - public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout"; - public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES); - - /** - * When timeout expired and initialization has not complete, call {@link System#exit(int)} when - * true, do nothing otherwise. - */ - public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout"; - public static final boolean HALT_DEFAULT = false; - private final HMaster master; - private final long timeout; - private final boolean haltOnTimeout; - - /** Creates a Thread that monitors the {@link #isInitialized()} state. */ - InitializationMonitor(HMaster master) { - super("MasterInitializationMonitor"); - this.master = master; - this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT); - this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT); - this.setDaemon(true); - } - - @Override - public void run() { - try { - while (!master.isStopped() && master.isActiveMaster()) { - Thread.sleep(timeout); - if (master.isInitialized()) { - LOG.debug("Initialization completed within allotted tolerance. Monitor exiting."); - } else { - LOG.error("Master failed to complete initialization after " + timeout + "ms. Please" - + " consider submitting a bug report including a thread dump of this process."); - if (haltOnTimeout) { - LOG.error("Zombie Master exiting. Thread dump to stdout"); - Threads.printThreadInfo(System.out, "Zombie HMaster"); - System.exit(-1); - } - } - } - } catch (InterruptedException ie) { - LOG.trace("InitMonitor thread interrupted. Existing."); - } - } - } + private static final Logger LOG = LoggerFactory.getLogger(HMaster.class); // MASTER is name of the webapp and the attribute name used stuffing this //instance into web context. @@ -464,48 +407,6 @@ public void run() { // Cached clusterId on stand by masters to serve clusterID requests from clients. private final CachedClusterId cachedClusterId; - public static class RedirectServlet extends HttpServlet { - private static final long serialVersionUID = 2894774810058302473L; - private final int regionServerInfoPort; - private final String regionServerHostname; - - /** - * @param infoServer that we're trying to send all requests to - * @param hostname may be null. if given, will be used for redirects instead of host from client. - */ - public RedirectServlet(InfoServer infoServer, String hostname) { - regionServerInfoPort = infoServer.getPort(); - regionServerHostname = hostname; - } - - @Override - public void doGet(HttpServletRequest request, - HttpServletResponse response) throws ServletException, IOException { - String redirectHost = regionServerHostname; - if(redirectHost == null) { - redirectHost = request.getServerName(); - if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) { - LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" + - MASTER_HOSTNAME_KEY + "' is not set; client will get an HTTP 400 response. If " + - "your HBase deployment relies on client accessible names that the region server process " + - "can't resolve locally, then you should set the previously mentioned configuration variable " + - "to an appropriate hostname."); - // no sending client provided input back to the client, so the goal host is just in the logs. - response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " + - "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " + - "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname."); - return; - } - } - // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the - // host and port we're using, but it's buried way too deep to do that ATM. - String redirectUrl = request.getScheme() + "://" - + redirectHost + ":" + regionServerInfoPort - + request.getRequestURI(); - response.sendRedirect(redirectUrl); - } - } - /** * Initializes the HMaster. The steps are as follows: *

    @@ -678,7 +579,7 @@ private int putUpJettyServer() throws IOException { final String redirectHostname = StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; - final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname); + final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname); final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); context.addServlet(new ServletHolder(redirect), "/*"); context.setServer(masterJettyServer); @@ -998,7 +899,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc this.activeMaster = true; // Start the Zombie master detector after setting master as active, see HBASE-21535 - Thread zombieDetector = new Thread(new InitializationMonitor(this), + Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), "ActiveMasterInitializationMonitor-" + System.currentTimeMillis()); zombieDetector.setDaemon(true); zombieDetector.start(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java new file mode 100644 index 000000000000..dcfeeab41309 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterInitializationMonitor.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Protection against zombie master. Started once Master accepts active responsibility and starts + * taking over responsibilities. Allows a finite time window before giving up ownership. + */ +@InterfaceAudience.Private +class MasterInitializationMonitor extends Thread { + + private static final Logger LOG = LoggerFactory.getLogger(MasterInitializationMonitor.class); + + /** The amount of time in milliseconds to sleep before checking initialization status. */ + public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout"; + public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES); + + /** + * When timeout expired and initialization has not complete, call {@link System#exit(int)} when + * true, do nothing otherwise. + */ + public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout"; + public static final boolean HALT_DEFAULT = false; + + private final HMaster master; + private final long timeout; + private final boolean haltOnTimeout; + + /** Creates a Thread that monitors the {@link #isInitialized()} state. */ + MasterInitializationMonitor(HMaster master) { + super("MasterInitializationMonitor"); + this.master = master; + this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT); + this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT); + this.setDaemon(true); + } + + @Override + public void run() { + try { + while (!master.isStopped() && master.isActiveMaster()) { + Thread.sleep(timeout); + if (master.isInitialized()) { + LOG.debug("Initialization completed within allotted tolerance. Monitor exiting."); + } else { + LOG.error("Master failed to complete initialization after " + timeout + "ms. Please" + + " consider submitting a bug report including a thread dump of this process."); + if (haltOnTimeout) { + LOG.error("Zombie Master exiting. Thread dump to stdout"); + Threads.printThreadInfo(System.out, "Zombie HMaster"); + System.exit(-1); + } + } + } + } catch (InterruptedException ie) { + LOG.trace("InitMonitor thread interrupted. Existing."); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java new file mode 100644 index 000000000000..bda2934dbfb2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRedirectServlet.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY; + +import java.io.IOException; +import java.net.InetAddress; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.hbase.http.InfoServer; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +class MasterRedirectServlet extends HttpServlet { + + private static final long serialVersionUID = 2894774810058302473L; + + private static final Logger LOG = LoggerFactory.getLogger(MasterRedirectServlet.class); + + private final int regionServerInfoPort; + private final String regionServerHostname; + + /** + * @param infoServer that we're trying to send all requests to + * @param hostname may be null. if given, will be used for redirects instead of host from client. + */ + public MasterRedirectServlet(InfoServer infoServer, String hostname) { + regionServerInfoPort = infoServer.getPort(); + regionServerHostname = hostname; + } + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + String redirectHost = regionServerHostname; + if (redirectHost == null) { + redirectHost = request.getServerName(); + if (!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) { + LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" + + MASTER_HOSTNAME_KEY + "' is not set; client will get an HTTP 400 response. If " + + "your HBase deployment relies on client accessible names that the region server " + + "process can't resolve locally, then you should set the previously mentioned " + + "configuration variable to an appropriate hostname."); + // no sending client provided input back to the client, so the goal host is just in the + // logs. + response.sendError(400, + "Request was to a host that I can't resolve for any of the network interfaces on " + + "this node. If this is due to an intermediary such as an HTTP load balancer or " + + "other proxy, your HBase administrator can set '" + MASTER_HOSTNAME_KEY + + "' to point to the correct hostname."); + return; + } + } + // TODO: this scheme should come from looking at the scheme registered in the infoserver's http + // server for the host and port we're using, but it's buried way too deep to do that ATM. + String redirectUrl = request.getScheme() + "://" + redirectHost + ":" + regionServerInfoPort + + request.getRequestURI(); + response.sendRedirect(redirectUrl); + } +} \ No newline at end of file From 1c45760fe4ca33ee7587aca5fe5fc9c26e2b1b27 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 8 Nov 2020 21:47:18 +0800 Subject: [PATCH 488/769] HBASE-25254 Rewrite TestMultiLogThreshold to remove the LogDelegate in RSRpcServices (#2631) Signed-off-by: Guanghao Zhang --- .../hbase/regionserver/RSRpcServices.java | 42 ++---- .../regionserver/TestMultiLogThreshold.java | 121 +++++++++++------- 2 files changed, 83 insertions(+), 80 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index e15e8e9c1753..ec280b8b01c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -107,8 +107,8 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.namequeues.NamedQueuePayload; -import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; import org.apache.hadoop.hbase.net.Address; @@ -140,7 +140,6 @@ import org.apache.hadoop.hbase.security.access.NoopAccessChecker; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DNS; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -248,6 +247,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; @@ -1112,34 +1112,9 @@ private void closeAllScanners() { } } - // Exposed for testing - interface LogDelegate { - void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold); - } - - private static LogDelegate DEFAULT_LOG_DELEGATE = new LogDelegate() { - @Override - public void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold) { - if (LOG.isWarnEnabled()) { - LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold - + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " - + RpcServer.getRequestUserName().orElse(null) + "/" - + RpcServer.getRemoteAddress().orElse(null) - + " first region in multi=" + firstRegionName); - } - } - }; - - private final LogDelegate ld; - - public RSRpcServices(final HRegionServer rs) throws IOException { - this(rs, DEFAULT_LOG_DELEGATE); - } - // Directly invoked only for testing - RSRpcServices(final HRegionServer rs, final LogDelegate ld) throws IOException { + public RSRpcServices(final HRegionServer rs) throws IOException { final Configuration conf = rs.getConfiguration(); - this.ld = ld; regionServer = rs; rowSizeWarnThreshold = conf.getInt( HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); @@ -2627,12 +2602,15 @@ private void checkBatchSizeAndLogLargeSize(MultiRequest request) throws ServiceE sum += regionAction.getActionCount(); } if (sum > rowSizeWarnThreshold) { - ld.logBatchWarning(firstRegionName, sum, rowSizeWarnThreshold); + LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold + + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " + + RpcServer.getRequestUserName().orElse(null) + "/" + + RpcServer.getRemoteAddress().orElse(null) + " first region in multi=" + firstRegionName); if (rejectRowsWithSizeOverThreshold) { throw new ServiceException( - "Rejecting large batch operation for current batch with firstRegionName: " - + firstRegionName + " , Requested Number of Rows: " + sum + " , Size Threshold: " - + rowSizeWarnThreshold); + "Rejecting large batch operation for current batch with firstRegionName: " + + firstRegionName + " , Requested Number of Rows: " + sum + " , Size Threshold: " + + rowSizeWarnThreshold); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java index 614b04b8eb45..26de198a8d42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java @@ -17,6 +17,14 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; + import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -26,16 +34,20 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; -import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.log4j.Appender; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.spi.LoggingEvent; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; @@ -52,21 +64,23 @@ * via "Multi" commands) so classified as MediumTests */ @RunWith(Parameterized.class) -@Category(LargeTests.class) +@Category(MediumTests.class) public class TestMultiLogThreshold { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiLogThreshold.class); - - private static RSRpcServices SERVICES; + HBaseClassTestRule.forClass(TestMultiLogThreshold.class); - private static HBaseTestingUtility TEST_UTIL; - private static Configuration CONF; + private static final TableName NAME = TableName.valueOf("tableName"); private static final byte[] TEST_FAM = Bytes.toBytes("fam"); - private static RSRpcServices.LogDelegate LD; - private static HRegionServer RS; - private static int THRESHOLD; + + private HBaseTestingUtility util; + private Configuration conf; + private int threshold; + private HRegionServer rs; + private RSRpcServices services; + + private Appender appender; @Parameterized.Parameter public static boolean rejectLargeBatchOp; @@ -78,20 +92,22 @@ public static List params() { @Before public void setupTest() throws Exception { - final TableName tableName = TableName.valueOf("tableName"); - TEST_UTIL = new HBaseTestingUtility(); - CONF = TEST_UTIL.getConfiguration(); - THRESHOLD = CONF.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, - HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); - CONF.setBoolean("hbase.rpc.rows.size.threshold.reject", rejectLargeBatchOp); - TEST_UTIL.startMiniCluster(); - TEST_UTIL.createTable(tableName, TEST_FAM); - RS = TEST_UTIL.getRSForFirstRegionInTable(tableName); + util = new HBaseTestingUtility(); + conf = util.getConfiguration(); + threshold = + conf.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); + conf.setBoolean("hbase.rpc.rows.size.threshold.reject", rejectLargeBatchOp); + util.startMiniCluster(); + util.createTable(NAME, TEST_FAM); + rs = util.getRSForFirstRegionInTable(NAME); + appender = mock(Appender.class); + LogManager.getLogger(RSRpcServices.class).addAppender(appender); } @After public void tearDown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); + LogManager.getLogger(RSRpcServices.class).removeAppender(appender); + util.shutdownMiniCluster(); } private enum ActionType { @@ -104,18 +120,18 @@ private enum ActionType { * Actions */ private void sendMultiRequest(int rows, ActionType actionType) - throws ServiceException, IOException { + throws ServiceException, IOException { RpcController rpcc = Mockito.mock(HBaseRpcController.class); MultiRequest.Builder builder = MultiRequest.newBuilder(); int numRAs = 1; int numAs = 1; switch (actionType) { - case REGION_ACTIONS: - numRAs = rows; - break; - case ACTIONS: - numAs = rows; - break; + case REGION_ACTIONS: + numRAs = rows; + break; + case ACTIONS: + numAs = rows; + break; } for (int i = 0; i < numRAs; i++) { RegionAction.Builder rab = RegionAction.newBuilder(); @@ -128,38 +144,47 @@ private void sendMultiRequest(int rows, ActionType actionType) } builder.addRegionAction(rab.build()); } - LD = Mockito.mock(RSRpcServices.LogDelegate.class); - SERVICES = new RSRpcServices(RS, LD); - SERVICES.multi(rpcc, builder.build()); + services = new RSRpcServices(rs); + services.multi(rpcc, builder.build()); + } + + private void assertLogBatchWarnings(boolean expected) { + ArgumentCaptor captor = ArgumentCaptor.forClass(LoggingEvent.class); + verify(appender, atLeastOnce()).doAppend(captor.capture()); + boolean actual = false; + for (LoggingEvent event : captor.getAllValues()) { + if (event.getLevel() == Level.WARN && + event.getRenderedMessage().contains("Large batch operation detected")) { + actual = true; + break; + } + } + reset(appender); + assertEquals(expected, actual); } @Test public void testMultiLogThresholdRegionActions() throws ServiceException, IOException { try { - sendMultiRequest(THRESHOLD + 1, ActionType.REGION_ACTIONS); - Assert.assertFalse(rejectLargeBatchOp); + sendMultiRequest(threshold + 1, ActionType.REGION_ACTIONS); + assertFalse(rejectLargeBatchOp); } catch (ServiceException e) { - Assert.assertTrue(rejectLargeBatchOp); + assertTrue(rejectLargeBatchOp); } - Mockito.verify(LD, Mockito.times(1)) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + assertLogBatchWarnings(true); - sendMultiRequest(THRESHOLD, ActionType.REGION_ACTIONS); - Mockito.verify(LD, Mockito.never()) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + sendMultiRequest(threshold, ActionType.REGION_ACTIONS); + assertLogBatchWarnings(false); try { - sendMultiRequest(THRESHOLD + 1, ActionType.ACTIONS); - Assert.assertFalse(rejectLargeBatchOp); + sendMultiRequest(threshold + 1, ActionType.ACTIONS); + assertFalse(rejectLargeBatchOp); } catch (ServiceException e) { - Assert.assertTrue(rejectLargeBatchOp); + assertTrue(rejectLargeBatchOp); } - Mockito.verify(LD, Mockito.times(1)) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + assertLogBatchWarnings(true); - sendMultiRequest(THRESHOLD, ActionType.ACTIONS); - Mockito.verify(LD, Mockito.never()) - .logBatchWarning(Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()); + sendMultiRequest(threshold, ActionType.ACTIONS); + assertLogBatchWarnings(false); } - } From d6852d30d592f4b2c6f117d210cb5a8139eb2b3f Mon Sep 17 00:00:00 2001 From: gvprathyusha6 Date: Mon, 9 Nov 2020 12:23:36 +0530 Subject: [PATCH 489/769] HBASE-24667 Rename configs that support atypical DNS set ups to put them in hbase.unsafe Closes #2542 Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/util/DNS.java | 14 ++++++- .../src/main/resources/hbase-default.xml | 6 +-- .../hbase/rest/TestSecureRESTServer.java | 2 +- .../hbase/regionserver/HRegionServer.java | 35 ++++++++++++---- .../TestRegionServerHostname.java | 40 +++++++++++++++---- 5 files changed, 75 insertions(+), 22 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java index 2b4e1cbf02cd..5c23ddcedb5a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java @@ -35,13 +35,22 @@ public final class DNS { // the specification of server hostname is optional. The hostname should be resolvable from // both master and region server @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) - public static final String RS_HOSTNAME_KEY = "hbase.regionserver.hostname"; + public static final String UNSAFE_RS_HOSTNAME_KEY = "hbase.unsafe.regionserver.hostname"; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static final String MASTER_HOSTNAME_KEY = "hbase.master.hostname"; private static boolean HAS_NEW_DNS_GET_DEFAULT_HOST_API; private static Method GET_DEFAULT_HOST_METHOD; + /** + * @deprecated since 2.4.0 and will be removed in 4.0.0. + * Use {@link DNS#UNSAFE_RS_HOSTNAME_KEY} instead. + * @see HBASE-24667 + */ + @Deprecated + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) + public static final String RS_HOSTNAME_KEY = "hbase.regionserver.hostname"; + static { try { GET_DEFAULT_HOST_METHOD = org.apache.hadoop.net.DNS.class @@ -50,6 +59,7 @@ public final class DNS { } catch (Exception e) { HAS_NEW_DNS_GET_DEFAULT_HOST_API = false; // FindBugs: Causes REC_CATCH_EXCEPTION. Suppressed } + Configuration.addDeprecation(RS_HOSTNAME_KEY, UNSAFE_RS_HOSTNAME_KEY); } public enum ServerType { @@ -106,7 +116,7 @@ public static String getHostname(@NonNull Configuration conf, @NonNull ServerTyp hostname = conf.get(MASTER_HOSTNAME_KEY); break; case REGIONSERVER: - hostname = conf.get(RS_HOSTNAME_KEY); + hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); break; default: hostname = null; diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 6fb6ce98e33d..def502a62cfc 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1119,19 +1119,19 @@ possible configurations would overwhelm and obscure the important. http://docs.oracle.com/javase/1.5.0/docs/api/java/net/Socket.html#getTcpNoDelay() - hbase.regionserver.hostname + hbase.unsafe.regionserver.hostname This config is for experts: don't set its value unless you really know what you are doing. When set to a non-empty value, this represents the (external facing) hostname for the underlying server. See https://issues.apache.org/jira/browse/HBASE-12954 for details. - hbase.regionserver.hostname.disable.master.reversedns + hbase.unsafe.regionserver.hostname.disable.master.reversedns false This config is for experts: don't set its value unless you really know what you are doing. When set to true, regionserver will use the current node hostname for the servername and HMaster will skip reverse DNS lookup and use the hostname sent by regionserver instead. Note that this config and - hbase.regionserver.hostname are mutually exclusive. See https://issues.apache.org/jira/browse/HBASE-18226 + hbase.unsafe.regionserver.hostname are mutually exclusive. See https://issues.apache.org/jira/browse/HBASE-18226 for more details. - 2.1.11 - 1.0.18 + 2.1.31 + 1.0.55 2.12.2 1.60 1.0.1 From dc947bd0fce55bf5a012e3ce2e9da46ed4cfd76f Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Wed, 11 Nov 2020 17:39:39 +0530 Subject: [PATCH 493/769] HBASE-25187 Improve SizeCachedKV variants initialization (#2582) * HBASE-25187 Improve SizeCachedKV variants initialization * HBASE-25187 Improve SizeCachedKV variants initialization * The BBKeyValue also can be optimized * Change for SizeCachedKeyValue * Addressing revew comments * Fixing checkstyle and spot bugs comments * Spot bug fix for hashCode * Minor updates make the rowLen as short and some consturctor formatting * Change two more places where there was a cast --- .../hbase/ByteBufferKeyOnlyKeyValue.java | 14 ++- .../hbase/SizeCachedByteBufferKeyValue.java | 90 +++++++++++++++++++ .../hadoop/hbase/SizeCachedKeyValue.java | 16 +++- .../SizeCachedNoTagsByteBufferKeyValue.java | 80 +++++++++++++++++ .../hbase/SizeCachedNoTagsKeyValue.java | 9 +- .../hbase/io/encoding/RowIndexSeekerV1.java | 22 +++-- .../hbase/io/hfile/HFileReaderImpl.java | 28 +++--- 7 files changed, 234 insertions(+), 25 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java index 31f71f98c500..cc7e8d72c3d7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java @@ -61,10 +61,22 @@ public ByteBufferKeyOnlyKeyValue(ByteBuffer buf, int offset, int length) { * @param length */ public void setKey(ByteBuffer key, int offset, int length) { + setKey(key, offset, length, ByteBufferUtils.toShort(key, offset)); + } + + /** + * A setter that helps to avoid object creation every time and whenever + * there is a need to create new OffheapKeyOnlyKeyValue. + * @param key - the key part of the cell + * @param offset - offset of the cell + * @param length - length of the cell + * @param rowLen - the rowlen part of the cell + */ + public void setKey(ByteBuffer key, int offset, int length, short rowLen) { this.buf = key; this.offset = offset; this.length = length; - this.rowLen = ByteBufferUtils.toShort(this.buf, this.offset); + this.rowLen = rowLen; } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java new file mode 100644 index 000000000000..9f5d9c179dd5 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedByteBufferKeyValue.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This Cell is an implementation of {@link ByteBufferExtendedCell} where the data resides in + * off heap/ on heap ByteBuffer + */ +@InterfaceAudience.Private +public class SizeCachedByteBufferKeyValue extends ByteBufferKeyValue { + + public static final int FIXED_OVERHEAD = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT; + private short rowLen; + private int keyLen; + + public SizeCachedByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen, short rowLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; + setSequenceId(seqId); + } + + @Override + public short getRowLength() { + return rowLen; + } + + @Override + public int getKeyLength() { + return this.keyLen; + } + + @Override + public long heapSize() { + return super.heapSize() + FIXED_OVERHEAD; + } + + /** + * Override by just returning the length for saving cost of method dispatching. If not, it will + * call {@link ExtendedCell#getSerializedSize()} firstly, then forward to + * {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657) + */ + @Override + public int getSerializedSize() { + return this.length; + } + + @Override + public boolean equals(Object other) { + return super.equals(other); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java index 663f3eb77c66..5141cfba08f7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java @@ -39,12 +39,22 @@ public class SizeCachedKeyValue extends KeyValue { private short rowLen; private int keyLen; - public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId) { + public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen) { super(bytes, offset, length); // We will read all these cached values at least once. Initialize now itself so that we can // avoid uninitialized checks with every time call - rowLen = super.getRowLength(); - keyLen = super.getKeyLength(); + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen, + short rowLen) { + super(bytes, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; setSequenceId(seqId); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java new file mode 100644 index 000000000000..0374169d9b79 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsByteBufferKeyValue.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This Cell is an implementation of {@link ByteBufferExtendedCell} where the data resides in + * off heap/ on heap ByteBuffer + */ +@InterfaceAudience.Private +public class SizeCachedNoTagsByteBufferKeyValue extends NoTagsByteBufferKeyValue { + + public static final int FIXED_OVERHEAD = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT; + private short rowLen; + private int keyLen; + + public SizeCachedNoTagsByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = super.getRowLength(); + this.keyLen = keyLen; + setSequenceId(seqId); + } + + public SizeCachedNoTagsByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId, + int keyLen, short rowLen) { + super(buf, offset, length); + // We will read all these cached values at least once. Initialize now itself so that we can + // avoid uninitialized checks with every time call + this.rowLen = rowLen; + this.keyLen = keyLen; + setSequenceId(seqId); + } + + @Override + public short getRowLength() { + return rowLen; + } + + @Override + public int getKeyLength() { + return this.keyLen; + } + + @Override + public long heapSize() { + return super.heapSize() + FIXED_OVERHEAD; + } + + @Override + public boolean equals(Object other) { + return super.equals(other); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java index 88b6177fcb18..85bdb52bbfd4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.java @@ -32,8 +32,13 @@ @InterfaceAudience.Private public class SizeCachedNoTagsKeyValue extends SizeCachedKeyValue { - public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId) { - super(bytes, offset, length, seqId); + public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen) { + super(bytes, offset, length, seqId, keyLen); + } + + public SizeCachedNoTagsKeyValue(byte[] bytes, int offset, int length, long seqId, int keyLen, + short rowLen) { + super(bytes, offset, length, seqId, keyLen, rowLen); } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java index 7ff7555ceb27..efc37e64522c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java @@ -18,15 +18,15 @@ import java.nio.ByteBuffer; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.NoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.SizeCachedByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; +import org.apache.hadoop.hbase.SizeCachedNoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; import org.apache.hadoop.hbase.io.encoding.AbstractDataBlockEncoder.AbstractEncodedSeeker; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -359,26 +359,30 @@ public Cell toCell() { // TODO : reduce the varieties of KV here. Check if based on a boolean // we can handle the 'no tags' case. if (tagsLength > 0) { + // TODO : getRow len here. ret = new SizeCachedKeyValue(currentBuffer.array(), - currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId); + currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId, keyLength); } else { ret = new SizeCachedNoTagsKeyValue(currentBuffer.array(), - currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId); + currentBuffer.arrayOffset() + startOffset, cellBufSize, seqId, keyLength); } } else { currentBuffer.asSubByteBuffer(startOffset, cellBufSize, tmpPair); ByteBuffer buf = tmpPair.getFirst(); if (buf.isDirect()) { - ret = - tagsLength > 0 ? new ByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId) - : new NoTagsByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId); + // TODO : getRow len here. + ret = tagsLength > 0 + ? new SizeCachedByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId, + keyLength) + : new SizeCachedNoTagsByteBufferKeyValue(buf, tmpPair.getSecond(), cellBufSize, seqId, + keyLength); } else { if (tagsLength > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() - + tmpPair.getSecond(), cellBufSize, seqId); + + tmpPair.getSecond(), cellBufSize, seqId, keyLength); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() - + tmpPair.getSecond(), cellBufSize, seqId); + + tmpPair.getSecond(), cellBufSize, seqId, keyLength); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index de0b15feebb8..2060b20de415 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; -import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.NoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.SizeCachedByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; +import org.apache.hadoop.hbase.SizeCachedNoTagsByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; @@ -322,6 +322,7 @@ protected static class HFileScannerImpl implements HFileScanner { private long currMemstoreTS; protected final HFile.Reader reader; private int currTagsLen; + private short rowLen; // buffer backed keyonlyKV private ByteBufferKeyOnlyKeyValue bufBackedKeyOnlyKv = new ByteBufferKeyOnlyKeyValue(); // A pair for reusing in blockSeek() so that we don't garbage lot of objects @@ -446,6 +447,7 @@ protected void readKeyValueLen() { this.currKeyLen = (int)(ll >> Integer.SIZE); this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); checkKeyValueLen(); + this.rowLen = blockBuffer.getShortAfterPosition(Bytes.SIZEOF_LONG); // Move position past the key and value lengths and then beyond the key and value int p = (Bytes.SIZEOF_LONG + currKeyLen + currValueLen); if (reader.getFileContext().isIncludesTags()) { @@ -554,8 +556,9 @@ protected int blockSeek(Cell key, boolean seekBefore) { + " path=" + reader.getPath()); } offsetFromPos += Bytes.SIZEOF_LONG; + this.rowLen = blockBuffer.getShortAfterPosition(offsetFromPos); blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair); - bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen); + bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen, rowLen); int comp = PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); offsetFromPos += klen + vlen; @@ -790,23 +793,28 @@ public Cell getCell() { // we can handle the 'no tags' case. if (currTagsLen > 0) { ret = new SizeCachedKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } else { ret = new SizeCachedNoTagsKeyValue(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId); + blockBuffer.arrayOffset() + blockBuffer.position(), cellBufSize, seqId, currKeyLen, + rowLen); } } else { ByteBuffer buf = blockBuffer.asSubByteBuffer(cellBufSize); if (buf.isDirect()) { - ret = currTagsLen > 0 ? new ByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId) - : new NoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId); + ret = currTagsLen > 0 + ? new SizeCachedByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen) + : new SizeCachedNoTagsByteBufferKeyValue(buf, buf.position(), cellBufSize, seqId, + currKeyLen, rowLen); } else { if (currTagsLen > 0) { ret = new SizeCachedKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId); + cellBufSize, seqId, currKeyLen, rowLen); } else { ret = new SizeCachedNoTagsKeyValue(buf.array(), buf.arrayOffset() + buf.position(), - cellBufSize, seqId); + cellBufSize, seqId, currKeyLen, rowLen); } } } @@ -1060,7 +1068,7 @@ public String getValueString() { public int compareKey(CellComparator comparator, Cell key) { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, pair); - this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen); + this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen, rowLen); return PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, this.bufBackedKeyOnlyKv); } From 1ce1d9d185fefbbe11531cbadbc6be14eb40bf9a Mon Sep 17 00:00:00 2001 From: niuyulin Date: Thu, 12 Nov 2020 17:40:24 +0800 Subject: [PATCH 494/769] HBASE-25276 Need to throw the original exception in HRegion#openHRegion (#2648) Signed-off-by: Guanghao Zhang --- .../org/apache/hadoop/hbase/regionserver/HRegion.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d0e628432b13..67764b9f635b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -8137,11 +8137,16 @@ protected HRegion openHRegion(final CancelableProgressable reporter) RegionReplicaUtil.isDefaultReplica(getRegionInfo())) { writeRegionOpenMarker(wal, openSeqNum); } - } catch(Throwable t) { + } catch (Throwable t) { // By coprocessor path wrong region will open failed, // MetricsRegionWrapperImpl is already init and not close, // add region close when open failed - this.close(); + try { + this.close(); + } catch (Throwable e) { + LOG.warn("Open region: {} failed. Try close region but got exception ", this.getRegionInfo(), + e); + } throw t; } return this; From 5eaa195c0ba6054f1b6a21c4e52b108bc4ea328b Mon Sep 17 00:00:00 2001 From: Mate Szalay-Beko Date: Thu, 12 Nov 2020 12:37:43 +0100 Subject: [PATCH 495/769] HBASE-25267 Add SSL keystore type and truststore related configs for HBase RESTServer (#2642) HBASE-25267 Make SSL keystore type configurable in HBase RESTServer In this patch I want to introduce the hbase.rest.ssl.keystore.type parameter, enabling us to customize the keystore type for the REST server. If the parameter is not provided, then we should fall-back to the current behaviour (which assumes keystore type JKS). This is similar to how we already configure the InfoServer objects with the ssl.server.keystore.type parameter to set up HTTPS for the various admin UIs. Signed-off-by: Wellington Chevreuil Signed-off-by: Balazs Meszaros Signed-off-by: Sean Busbey --- .../hbase/http/ssl/KeyStoreTestUtil.java | 79 ++++++- .../apache/hadoop/hbase/rest/Constants.java | 4 + .../apache/hadoop/hbase/rest/RESTServer.java | 19 ++ .../hadoop/hbase/rest/client/Client.java | 86 +++++++- .../hadoop/hbase/rest/TestRESTServerSSL.java | 195 ++++++++++++++++++ 5 files changed, 368 insertions(+), 15 deletions(-) create mode 100644 hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index c8abd9c6cebc..c201c7a52328 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -101,7 +101,12 @@ public static KeyPair generateKeyPair(String algorithm) private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { - KeyStore ks = KeyStore.getInstance("JKS"); + return createEmptyKeyStore("jks"); + } + + private static KeyStore createEmptyKeyStore(String keyStoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = KeyStore.getInstance(keyStoreType); ks.load(null, null); // initialize return ks; } @@ -117,18 +122,29 @@ private static void saveKeyStore(KeyStore ks, String filename, } } + /** + * Creates a keystore with a single key and saves it to a file. + * This method will use the same password for the keystore and for the key. + * This method will always generate a keystore file in JKS format. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ public static void createKeyStore(String filename, String password, String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); - ks.setKeyEntry(alias, privateKey, password.toCharArray(), - new Certificate[]{cert}); - saveKeyStore(ks, filename, password); + createKeyStore(filename, password, password, alias, privateKey, cert); } /** * Creates a keystore with a single key and saves it to a file. + * This method will always generate a keystore file in JKS format. * * @param filename String file to save * @param password String store password to set on keystore @@ -143,17 +159,66 @@ public static void createKeyStore(String filename, String password, String keyPassword, String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); + createKeyStore(filename, password, keyPassword, alias, privateKey, cert, "JKS"); + } + + + /** + * Creates a keystore with a single key and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param keyPassword String key password to set on key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @param keystoreType String keystore file type (e.g. "JKS") + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createKeyStore(String filename, String password, String keyPassword, + String alias, Key privateKey, Certificate cert, + String keystoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(keystoreType); ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), new Certificate[]{cert}); saveKeyStore(ks, filename, password); } + /** + * Creates a truststore with a single certificate and saves it to a file. + * This method uses the default JKS truststore type. + * + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ public static void createTrustStore(String filename, String password, String alias, Certificate cert) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); + createTrustStore(filename, password, alias, cert, "JKS"); + } + + /** + * Creates a truststore with a single certificate and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on truststore + * @param alias String alias to use for the certificate + * @param cert Certificate to add + * @param trustStoreType String keystore file type (e.g. "JKS") + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createTrustStore(String filename, String password, String alias, + Certificate cert, String trustStoreType) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(trustStoreType); ks.setCertificateEntry(alias, cert); saveKeyStore(ks, filename, password); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 4cf8a93ed5b0..704eac78db5a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -51,6 +51,10 @@ public interface Constants { String REST_SSL_ENABLED = "hbase.rest.ssl.enabled"; String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store"; String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password"; + String REST_SSL_KEYSTORE_TYPE = "hbase.rest.ssl.keystore.type"; + String REST_SSL_TRUSTSTORE_STORE = "hbase.rest.ssl.truststore.store"; + String REST_SSL_TRUSTSTORE_PASSWORD = "hbase.rest.ssl.truststore.password"; + String REST_SSL_TRUSTSTORE_TYPE = "hbase.rest.ssl.truststore.type"; String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword"; String REST_SSL_EXCLUDE_CIPHER_SUITES = "hbase.rest.ssl.exclude.cipher.suites"; String REST_SSL_INCLUDE_CIPHER_SUITES = "hbase.rest.ssl.include.cipher.suites"; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index c6f769ee6054..4e6adfb6d7c3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -27,6 +27,7 @@ import java.util.concurrent.ArrayBlockingQueue; import javax.servlet.DispatcherType; import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -305,14 +306,32 @@ public synchronized void run() throws Exception { SslContextFactory sslCtxFactory = new SslContextFactory(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); + String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE); String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null); String keyPassword = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password); sslCtxFactory.setKeyStorePath(keystore); + if(StringUtils.isNotBlank(keystoreType)) { + sslCtxFactory.setKeyStoreType(keystoreType); + } sslCtxFactory.setKeyStorePassword(password); sslCtxFactory.setKeyManagerPassword(keyPassword); + String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE); + if(StringUtils.isNotBlank(trustStore)) { + sslCtxFactory.setTrustStorePath(trustStore); + } + String trustStorePassword = + HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); + if(StringUtils.isNotBlank(trustStorePassword)) { + sslCtxFactory.setTrustStorePassword(trustStorePassword); + } + String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE); + if(StringUtils.isNotBlank(trustStoreType)) { + sslCtxFactory.setTrustStoreType(trustStoreType); + } + String[] excludeCiphers = servlet.getConfiguration().getStrings( REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (excludeCiphers.length != 0) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 9e6661bd2aac..47700aa9e4fe 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -21,15 +21,23 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; import java.util.Collections; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; - +import javax.net.ssl.SSLContext; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; @@ -37,6 +45,7 @@ import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.HttpClient; +import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpHead; @@ -44,9 +53,10 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.entity.InputStreamEntity; -import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; import org.apache.http.message.BasicHeader; -import org.apache.http.params.CoreConnectionPNames; +import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -81,14 +91,35 @@ public Client() { this(null); } - private void initialize(Cluster cluster, boolean sslEnabled) { + private void initialize(Cluster cluster, boolean sslEnabled, Optional trustStore) { this.cluster = cluster; this.sslEnabled = sslEnabled; extraHeaders = new ConcurrentHashMap<>(); String clspath = System.getProperty("java.class.path"); LOG.debug("classpath " + clspath); - this.httpClient = new DefaultHttpClient(); - this.httpClient.getParams().setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 2000); + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + + RequestConfig requestConfig = RequestConfig.custom(). + setConnectTimeout(2000).build(); + httpClientBuilder.setDefaultRequestConfig(requestConfig); + + // Since HBASE-25267 we don't use the deprecated DefaultHttpClient anymore. + // The new http client would decompress the gzip content automatically. + // In order to keep the original behaviour of this public class, we disable + // automatic content compression. + httpClientBuilder.disableContentCompression(); + + if(sslEnabled && trustStore.isPresent()) { + try { + SSLContext sslcontext = + SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); + httpClientBuilder.setSSLContext(sslcontext); + } catch (NoSuchAlgorithmException | KeyStoreException | KeyManagementException e) { + throw new ClientTrustStoreInitializationException("Error while processing truststore", e); + } + } + + this.httpClient = httpClientBuilder.build(); } /** @@ -96,7 +127,7 @@ private void initialize(Cluster cluster, boolean sslEnabled) { * @param cluster the cluster definition */ public Client(Cluster cluster) { - initialize(cluster, false); + this(cluster, false); } /** @@ -105,7 +136,38 @@ public Client(Cluster cluster) { * @param sslEnabled enable SSL or not */ public Client(Cluster cluster, boolean sslEnabled) { - initialize(cluster, sslEnabled); + initialize(cluster, sslEnabled, Optional.empty()); + } + + /** + * Constructor, allowing to define custom trust store (only for SSL connections) + * + * @param cluster the cluster definition + * @param trustStorePath custom trust store to use for SSL connections + * @param trustStorePassword password to use for custom trust store + * @param trustStoreType type of custom trust store + * + * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded + */ + public Client(Cluster cluster, String trustStorePath, + Optional trustStorePassword, Optional trustStoreType) { + + char[] password = trustStorePassword.map(String::toCharArray).orElse(null); + String type = trustStoreType.orElse(KeyStore.getDefaultType()); + + KeyStore trustStore; + try(FileInputStream inputStream = new FileInputStream(new File(trustStorePath))) { + trustStore = KeyStore.getInstance(type); + trustStore.load(inputStream, password); + } catch (KeyStoreException e) { + throw new ClientTrustStoreInitializationException( + "Invalid trust store type: " + type, e); + } catch (CertificateException | NoSuchAlgorithmException | IOException e) { + throw new ClientTrustStoreInitializationException( + "Trust store load error: " + trustStorePath, e); + } + + initialize(cluster, true, Optional.of(trustStore)); } /** @@ -724,4 +786,12 @@ public Response delete(Cluster cluster, String path, Header extraHdr) throws IOE method.releaseConnection(); } } + + + public static class ClientTrustStoreInitializationException extends RuntimeException { + + public ClientTrustStoreInitializationException(String message, Throwable cause) { + super(message, cause); + } + } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java new file mode 100644 index 000000000000..a1fe2f010fdb --- /dev/null +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.Optional; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RestTests.class, MediumTests.class}) +public class TestRESTServerSSL { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRESTServerSSL.class); + + private static final String KEY_STORE_PASSWORD = "myKSPassword"; + private static final String TRUST_STORE_PASSWORD = "myTSPassword"; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); + private static Client sslClient; + private static File keyDir; + private Configuration conf; + + @BeforeClass + public static void beforeClass() throws Exception { + keyDir = initKeystoreDir(); + KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); + X509Certificate serverCertificate = KeyStoreTestUtil.generateCertificate( + "CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); + + generateTrustStore("jks", serverCertificate); + generateTrustStore("jceks", serverCertificate); + generateTrustStore("pkcs12", serverCertificate); + + generateKeyStore("jks", keyPair, serverCertificate); + generateKeyStore("jceks", keyPair, serverCertificate); + generateKeyStore("pkcs12", keyPair, serverCertificate); + + TEST_UTIL.startMiniCluster(); + } + + @AfterClass + public static void afterClass() throws Exception { + // this will also delete the generated test keystore / teststore files, + // as we were placing them under the dataTestDir used by the minicluster + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeEachTest() { + conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.set(Constants.REST_SSL_ENABLED, "true"); + conf.set(Constants.REST_SSL_KEYSTORE_KEYPASSWORD, KEY_STORE_PASSWORD); + conf.set(Constants.REST_SSL_KEYSTORE_PASSWORD, KEY_STORE_PASSWORD); + conf.set(Constants.REST_SSL_TRUSTSTORE_PASSWORD, TRUST_STORE_PASSWORD); + } + + @After + public void tearDownAfterTest() { + REST_TEST_UTIL.shutdownServletContainer(); + } + + @Test + public void testSslConnection() throws Exception { + startRESTServerWithDefaultKeystoreType(); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test(expected = org.apache.http.client.ClientProtocolException.class) + public void testNonSslClientDenied() throws Exception { + startRESTServerWithDefaultKeystoreType(); + + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + Client nonSslClient = new Client(localCluster, false); + + nonSslClient.get("/version"); + } + + @Test + public void testSslConnectionUsingKeystoreFormatJKS() throws Exception { + startRESTServer("jks"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test + public void testSslConnectionUsingKeystoreFormatJCEKS() throws Exception { + startRESTServer("jceks"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + @Test + public void testSslConnectionUsingKeystoreFormatPKCS12() throws Exception { + startRESTServer("pkcs12"); + + Response response = sslClient.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + } + + + + private static File initKeystoreDir() { + String dataTestDir = TEST_UTIL.getDataTestDir().toString(); + File keystoreDir = new File(dataTestDir, TestRESTServerSSL.class.getSimpleName() + "_keys"); + keystoreDir.mkdirs(); + return keystoreDir; + } + + private static void generateKeyStore(String keyStoreType, KeyPair keyPair, + X509Certificate serverCertificate) throws Exception { + String keyStorePath = getKeystoreFilePath(keyStoreType); + KeyStoreTestUtil.createKeyStore(keyStorePath, KEY_STORE_PASSWORD, KEY_STORE_PASSWORD, + "serverKS", keyPair.getPrivate(), serverCertificate, keyStoreType); + } + + private static void generateTrustStore(String trustStoreType, X509Certificate serverCertificate) + throws Exception { + String trustStorePath = getTruststoreFilePath(trustStoreType); + KeyStoreTestUtil.createTrustStore(trustStorePath, TRUST_STORE_PASSWORD, "serverTS", + serverCertificate, trustStoreType); + } + + private static String getKeystoreFilePath(String keyStoreType) { + return String.format("%s/serverKS.%s", keyDir.getAbsolutePath(), keyStoreType); + } + + private static String getTruststoreFilePath(String trustStoreType) { + return String.format("%s/serverTS.%s", keyDir.getAbsolutePath(), trustStoreType); + } + + private void startRESTServerWithDefaultKeystoreType() throws Exception { + conf.set(Constants.REST_SSL_KEYSTORE_STORE, getKeystoreFilePath("jks")); + conf.set(Constants.REST_SSL_TRUSTSTORE_STORE, getTruststoreFilePath("jks")); + + REST_TEST_UTIL.startServletContainer(conf); + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + sslClient = new Client(localCluster, getTruststoreFilePath("jks"), + Optional.of(TRUST_STORE_PASSWORD), Optional.empty()); + } + + private void startRESTServer(String storeType) throws Exception { + conf.set(Constants.REST_SSL_KEYSTORE_TYPE, storeType); + conf.set(Constants.REST_SSL_KEYSTORE_STORE, getKeystoreFilePath(storeType)); + + conf.set(Constants.REST_SSL_TRUSTSTORE_STORE, getTruststoreFilePath(storeType)); + conf.set(Constants.REST_SSL_TRUSTSTORE_TYPE, storeType); + + REST_TEST_UTIL.startServletContainer(conf); + Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); + sslClient = new Client(localCluster, getTruststoreFilePath(storeType), + Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); + } + +} From e86b28d0abc3747877ded34b1a6d6f40389cf71d Mon Sep 17 00:00:00 2001 From: WenFeiYi Date: Thu, 12 Nov 2020 22:13:24 +0800 Subject: [PATCH 496/769] HBASE-25253 Deprecated master carrys regions related methods and configs (#2635) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/ZNodeClearer.java | 4 +++- .../hadoop/hbase/master/LoadBalancer.java | 21 +++++++++++++++++++ .../master/balancer/BaseLoadBalancer.java | 19 +++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index 697706507bf0..1cde2fa24844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -146,9 +146,11 @@ public static String parseMasterServerName(String rsZnodePath) { } /** - * * @return true if cluster is configured with master-rs collocation + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated private static boolean tablesOnMaster(Configuration conf) { boolean tablesOnMaster = true; String confValue = conf.get(BaseLoadBalancer.TABLES_ON_MASTER); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 90cb3946f8b2..d908aa5ef514 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -52,12 +52,20 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse * Master can carry regions as of hbase-2.0.0. * By default, it carries no tables. * TODO: Add any | system as flags to indicate what it can do. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated String TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster"; /** * Master carries system tables. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated String SYSTEM_TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster.systemTablesOnly"; // Used to signal to the caller that the region(s) cannot be assigned @@ -159,15 +167,28 @@ Map> retainAssignment(Map r /** * @return true if Master carries regions + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated static boolean isTablesOnMaster(Configuration conf) { return conf.getBoolean(TABLES_ON_MASTER, false); } + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated static boolean isSystemTablesOnlyOnMaster(Configuration conf) { return conf.getBoolean(SYSTEM_TABLES_ON_MASTER, false); } + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated static boolean isMasterCanHostUserRegions(Configuration conf) { return isTablesOnMaster(conf) && !isSystemTablesOnlyOnMaster(conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index a47bff26a090..5ecedb35ff08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -1042,7 +1042,14 @@ public String toString() { protected ClusterMetrics clusterStatus = null; protected ServerName masterServerName; protected MasterServices services; + + /** + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 + */ + @Deprecated protected boolean onlySystemTablesOnMaster; + protected boolean maintenanceMode; @Override @@ -1075,7 +1082,11 @@ protected void setSlop(Configuration conf) { /** * Check if a region belongs to some system table. * If so, the primary replica may be expected to be put on the master regionserver. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated public boolean shouldBeOnMaster(RegionInfo region) { return (this.maintenanceMode || this.onlySystemTablesOnMaster) && region.getTable().isSystemTable(); @@ -1083,7 +1094,11 @@ public boolean shouldBeOnMaster(RegionInfo region) { /** * Balance the regions that should be on master regionserver. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated protected List balanceMasterRegions(Map> clusterMap) { if (masterServerName == null || clusterMap == null || clusterMap.size() <= 1) return null; List plans = null; @@ -1132,7 +1147,11 @@ protected List balanceMasterRegions(Map /** * If master is configured to carry system tables only, in here is * where we figure what to assign it. + * + * @deprecated since 2.4.0, will be removed in 3.0.0. + * @see HBASE-15549 */ + @Deprecated @NonNull protected Map> assignMasterSystemRegions( Collection regions, List servers) { From ae60b33a6ef93330d171b5a7d4a3a080679ae9fd Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Thu, 12 Nov 2020 15:37:12 +0100 Subject: [PATCH 497/769] HBASE-25275 Upgrade asciidoctor (#2647) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- pom.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index cd76195611c2..9255f833d679 100755 --- a/pom.xml +++ b/pom.xml @@ -1430,10 +1430,10 @@ ${project.reporting.outputDirectory}/ book - images - coderay ${project.version} + images + coderay @@ -1641,8 +1641,8 @@ 1.0.0 4.2.0 - 1.5.8 - 1.5.0-rc.2 + 2.1.0 + 1.5.3 3.0.0 1.4 From 8d8ec552c956536c9baef072807238cdca9dd393 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 2 Dec 2020 22:23:03 +0800 Subject: [PATCH 555/769] HBASE-25342 Upgrade error prone to 2.4.0 (#2725) Have to disable MutablePublicArray because of a bug in error prone https://github.com/google/error-prone/issues/1645 Signed-off-by: stack --- hbase-build-configuration/pom.xml | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index 4bab5e9c579d..49a1dea8a199 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -108,7 +108,7 @@ -XDcompilePolicy=simple - -Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -Xep:FallThrough:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR + -Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -Xep:FallThrough:OFF -Xep:MutablePublicArray:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR -J-Xbootclasspath/p:${settings.localRepository}/com/google/errorprone/javac/${javac.version}/javac-${javac.version}.jar diff --git a/pom.xml b/pom.xml index 5b95ca4b441d..05fde4f5453d 100755 --- a/pom.xml +++ b/pom.xml @@ -1650,7 +1650,7 @@ --> 8.28 1.6.0 - 2.3.4 + 2.4.0 2.4.2 1.0.0 1.8 From 57809e516341f2922e0a2f8a87f904634a9baeee Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Wed, 2 Dec 2020 20:36:37 +0530 Subject: [PATCH 556/769] HBASE-25246 Backup/Restore hbase cell tags Closes #2706 Signed-off-by: Viraj Jasani --- .../hbase/shaded/protobuf/ProtobufUtil.java | 24 ++- .../shaded/protobuf/TestProtobufUtil.java | 44 +++++ .../apache/hadoop/hbase/mapreduce/Import.java | 5 +- .../hbase/mapreduce/TestImportExport.java | 158 ++++++++++++++++++ 4 files changed, 221 insertions(+), 10 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index b9a08676f8ee..cfbdd6486255 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2015,7 +2015,8 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); - // TODO : Once tags become first class then we may have to set tags to kvbuilder. + kvbuilder.setTags(wrap(((ByteBufferExtendedCell) kv).getTagsByteBuffer(), + ((ByteBufferExtendedCell) kv).getTagsPosition(), kv.getTagsLength())); } else { kvbuilder.setRow( UnsafeByteOperations.unsafeWrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); @@ -2027,6 +2028,8 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + kvbuilder.setTags(UnsafeByteOperations.unsafeWrap(kv.getTagsArray(), kv.getTagsOffset(), + kv.getTagsLength())); } return kvbuilder.build(); } @@ -2039,14 +2042,17 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - return cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()) - .build(); + ExtendedCellBuilder builder = cellBuilder.clear() + .setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()) + .setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()) + .setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()); + if (cell.hasTags()) { + builder.setTags(cell.getTags().toByteArray()); + } + return builder.build(); } public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 7d6eda817cfa..791beb7ede55 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -18,22 +18,30 @@ package org.apache.hadoop.hbase.shaded.protobuf; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -479,4 +487,40 @@ public void testRegionLockInfo() { + "\"sharedLockCount\":0" + "}]", lockJson); } + + /** + * Test {@link ProtobufUtil#toCell(Cell)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell)} conversion + * methods when it contains tags. + */ + @Test + public void testCellConversionWithTags() { + String tagStr = "tag-1"; + byte tagType = (byte)10; + Tag tag = new ArrayBackedTag(tagType, tagStr); + + ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + cellBuilder.setRow(Bytes.toBytes("row1")); + cellBuilder.setFamily(Bytes.toBytes("f1")); + cellBuilder.setQualifier(Bytes.toBytes("q1")); + cellBuilder.setValue(Bytes.toBytes("value1")); + cellBuilder.setType(Cell.Type.Delete); + cellBuilder.setTags(Collections.singletonList(tag)); + Cell cell = cellBuilder.build(); + + ClientProtos.Result protoResult = + ProtobufUtil.toResult(Result.create(Collections.singletonList(cell))); + assertNotNull(protoResult); + assertEquals(1, protoResult.getCellCount()); + + CellProtos.Cell protoCell = protoResult.getCell(0); + ExtendedCellBuilder decodedBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + Cell decodedCell = ProtobufUtil.toCell(decodedBuilder, protoCell); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(1, decodedTags.size()); + Tag decodedTag = decodedTags.get(0); + assertEquals(tagType, decodedTag.getType()); + assertEquals(tagStr, Tag.getValueAsString(decodedTag)); + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 239a12bdc688..30071fdfd809 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -511,6 +512,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { + List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -524,7 +526,8 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength()); // value length + kv.getValueLength(), // value length + tags.size() == 0 ? null: tags); } } return kv; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 12060a742a2b..5a95fd8eecb6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -34,10 +34,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -46,10 +49,12 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -58,11 +63,18 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.CellImporter; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -117,6 +129,9 @@ public class TestImportExport { private static final long now = System.currentTimeMillis(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final String TEST_ATTR = "source_op"; + public static final String TEST_TAG = "test_tag"; @BeforeClass public static void beforeClass() throws Throwable { @@ -801,4 +816,147 @@ public boolean isWALVisited() { return isVisited; } } + + /** + * Add cell tags to delete mutations, run export and import tool and + * verify that tags are present in import table also. + * @throws Throwable throws Throwable. + */ + @Test + public void testTagsAddition() throws Throwable { + final TableName exportTable = TableName.valueOf(name.getMethodName()); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(desc); + + Table exportT = UTIL.getConnection().getTable(exportTable); + + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + exportT.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + exportT.delete(d); + + // Run export too with KeyValueCodecWithTags as Codec. This will ensure that export tool + // will use KeyValueCodecWithTags. + String[] args = new String[] { + "-D" + ExportUtils.RAW_SCAN + "=true", + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + exportTable.getNameAsString(), + FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); + // Assert tag exists in exportTable + assertTagExists(exportTable); + + // Create an import table with MetadataController. + final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); + TableDescriptor importTableDesc = TableDescriptorBuilder + .newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(importTableDesc); + + // Run import tool. + args = new String[] { + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + importTable.getNameAsString(), + FQ_OUTPUT_DIR + }; + assertTrue(runImport(args)); + // Make sure that tags exists in imported table. + assertTagExists(importTable); + } + + private void assertTagExists(TableName table) throws IOException { + List values = new ArrayList<>(); + for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { + Scan scan = new Scan(); + // Make sure to set rawScan to true so that we will get Delete Markers. + scan.setRaw(true); + scan.readAllVersions(); + scan.withStartRow(ROW1); + // Need to use RegionScanner instead of table#getScanner since the latter will + // not return tags since it will go through rpc layer and remove tags intentionally. + RegionScanner scanner = region.getScanner(scan); + scanner.next(values); + if (!values.isEmpty()) { + break; + } + } + boolean deleteFound = false; + for (Cell cell: values) { + if (PrivateCellUtil.isDelete(cell.getType().getCode())) { + deleteFound = true; + List tags = PrivateCellUtil.getTags(cell); + Assert.assertEquals(1, tags.size()); + for (Tag tag : tags) { + Assert.assertEquals(TEST_TAG, Tag.getValueAsString(tag)); + } + } + } + Assert.assertTrue(deleteFound); + } + + /* + This co-proc will add a cell tag to delete mutation. + */ + public static class MetadataController implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) + throws IOException { + if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { + return; + } + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (!(m instanceof Delete)) { + continue; + } + byte[] sourceOpAttr = m.getAttribute(TEST_ATTR); + if (sourceOpAttr == null) { + continue; + } + Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); + List updatedCells = new ArrayList<>(); + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + Cell cell = cellScanner.current(); + List tags = PrivateCellUtil.getTags(cell); + tags.add(sourceOpTag); + Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + updatedCells.add(updatedCell); + } + m.getFamilyCellMap().clear(); + // Clear and add new Cells to the Mutation. + for (Cell cell : updatedCells) { + Delete d = (Delete) m; + d.add(cell); + } + } + } + } } From a504da33e25c25836af3ed7dfbd88e87efbd4dea Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 2 Dec 2020 09:55:24 -0800 Subject: [PATCH 557/769] HBASE-25349 [Flakey Tests] branch-2 TestRefreshRecoveredReplication.testReplicationRefreshSource:141 Waiting timed out after [60,000] msec (#2731) Start the check for recovered queue presence earlier. Signed-off-by: Nick Dimiduk --- .../ReplicationSourceManager.java | 2 +- .../TestRefreshRecoveredReplication.java | 29 ++++++++++++------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 95157ca98d9b..c1166802b0ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -999,7 +999,7 @@ public void run() { wals.add(wal); } oldsources.add(src); - LOG.trace("Added source for recovered queue: " + src.getQueueId()); + LOG.info("Added source for recovered queue {}", src.getQueueId()); for (String wal : walsSet) { LOG.trace("Enqueueing log from recovered queue for source: " + src.getQueueId()); src.enqueueLog(new Path(oldLogDir, wal)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java index f84f32abdf88..cf4f7106f060 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; +import java.util.Collection; +import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -32,6 +35,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.replication.TestReplicationBase; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; @@ -51,6 +55,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; +import static org.junit.Assert.assertEquals; /** * Testcase for HBASE-24871. @@ -75,6 +80,7 @@ public class TestRefreshRecoveredReplication extends TestReplicationBase { @BeforeClass public static void setUpBeforeClass() throws Exception { + // NUM_SLAVES1 is presumed 2 in below. NUM_SLAVES1 = 2; // replicate slowly Configuration conf1 = UTIL1.getConfiguration(); @@ -121,22 +127,25 @@ public void testReplicationRefreshSource() throws Exception { table1.put(new Put(r).addColumn(famName, famName, r)); } - // kill rs holding table region - Optional server = UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads() - .stream() + // Kill rs holding table region. There are only TWO servers. We depend on it. + List rss = UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads(); + assertEquals(2, rss.size()); + Optional server = rss.stream() .filter(rst -> CollectionUtils.isNotEmpty(rst.getRegionServer().getRegions(tablename))) .findAny(); Assert.assertTrue(server.isPresent()); + HRegionServer otherServer = rss.get(0).getRegionServer() == server.get().getRegionServer()? + rss.get(1).getRegionServer(): rss.get(0).getRegionServer(); server.get().getRegionServer().abort("stopping for test"); + // waiting for recovered peer to appear. + Replication replication = (Replication)otherServer.getReplicationSourceService(); + UTIL1.waitFor(60000, () -> !replication.getReplicationManager().getOldSources().isEmpty()); + // Wait on only one server being up. UTIL1.waitFor(60000, () -> - UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() == NUM_SLAVES1 - 1); + // Have to go back to source here because getLiveRegionServerThreads makes new array each time + UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() == NUM_SLAVES1 - 1); UTIL1.waitTableAvailable(tablename); - - // waiting for recovered peer to start - Replication replication = (Replication) UTIL1.getMiniHBaseCluster() - .getLiveRegionServerThreads().get(0).getRegionServer().getReplicationSourceService(); - UTIL1.waitFor(60000, () -> - !replication.getReplicationManager().getOldSources().isEmpty()); + LOG.info("Available {}", tablename); // disable peer to trigger refreshSources hbaseAdmin.disableReplicationPeer(PEER_ID2); From 706f0ae4f538c39f708b65ff3155a70c8103c12b Mon Sep 17 00:00:00 2001 From: Laxman Goswami Date: Thu, 3 Dec 2020 14:31:49 +0530 Subject: [PATCH 558/769] HBASE-25230 Embedded zookeeper server not clean up the old data Closes #2732 Signed-off-by: maoling Signed-off-by: Viraj Jasani --- .../hadoop/hbase/zookeeper/HQuorumPeer.java | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index 4f2a77ce6775..54c74991235a 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -44,6 +44,7 @@ import org.apache.zookeeper.server.admin.AdminServer; import org.apache.zookeeper.server.quorum.QuorumPeerConfig; import org.apache.zookeeper.server.quorum.QuorumPeerMain; +import org.apache.zookeeper.server.DatadirCleanupManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,6 +89,20 @@ public static void main(String[] args) { private static void runZKServer(QuorumPeerConfig zkConfig) throws IOException, AdminServer.AdminServerException { + + /** + * Start and schedule the purge task + * autopurge.purgeInterval is 0 by default,so in fact the DatadirCleanupManager task will not + * be started to clean the logs by default. Config is recommended only for standalone server. + */ + + DatadirCleanupManager purgeMgr=new DatadirCleanupManager( + zkConfig.getDataDir(), + zkConfig.getDataLogDir(), + zkConfig.getSnapRetainCount(), + zkConfig.getPurgeInterval()); + purgeMgr.start(); + if (zkConfig.isDistributed()) { QuorumPeerMain qp = new QuorumPeerMain(); qp.runFromConfig(zkConfig); From 3f3cf56e78574a112cb1f88c3086e2a58311b7b8 Mon Sep 17 00:00:00 2001 From: tianhang Date: Thu, 3 Dec 2020 21:29:33 +0800 Subject: [PATCH 559/769] HBASE-25355 [Documentation] Fix spelling error (#2735) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/ops_mgt.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index d27c5d6e488f..e491cbc95b54 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -2697,7 +2697,7 @@ up, while `Source.TimeStampOfLastShippedOp`, `Source.EditsReadFromLogQueue`, `Source.OpsShippedToTarget` or `Source.TimeStampOfNextToReplicate` do not change at all, then replication flow is failing to progress, and there might be problems within clusters communication. This could also happen if replication is manually paused -(via hbase shell `disable_peer` command, for example), but date keeps getting ingested +(via hbase shell `disable_peer` command, for example), but data keeps getting ingested in the source cluster tables. == Running Multiple Workloads On a Single Cluster From 7ab49d1273301757e6a43ebfbf113f6d0b245124 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 3 Dec 2020 21:16:24 +0530 Subject: [PATCH 560/769] Revert "HBASE-25246 Backup/Restore hbase cell tags" This reverts commit 56eb5c9fc8de796665fe959087cb24b9f5f1afd4. --- .../hbase/shaded/protobuf/ProtobufUtil.java | 24 +-- .../shaded/protobuf/TestProtobufUtil.java | 44 ----- .../apache/hadoop/hbase/mapreduce/Import.java | 5 +- .../hbase/mapreduce/TestImportExport.java | 158 ------------------ 4 files changed, 10 insertions(+), 221 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index cfbdd6486255..b9a08676f8ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2015,8 +2015,7 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); - kvbuilder.setTags(wrap(((ByteBufferExtendedCell) kv).getTagsByteBuffer(), - ((ByteBufferExtendedCell) kv).getTagsPosition(), kv.getTagsLength())); + // TODO : Once tags become first class then we may have to set tags to kvbuilder. } else { kvbuilder.setRow( UnsafeByteOperations.unsafeWrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); @@ -2028,8 +2027,6 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); - kvbuilder.setTags(UnsafeByteOperations.unsafeWrap(kv.getTagsArray(), kv.getTagsOffset(), - kv.getTagsLength())); } return kvbuilder.build(); } @@ -2042,17 +2039,14 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - ExtendedCellBuilder builder = cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()); - if (cell.hasTags()) { - builder.setTags(cell.getTags().toByteArray()); - } - return builder.build(); + return cellBuilder.clear() + .setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()) + .setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()) + .setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()) + .build(); } public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 791beb7ede55..7d6eda817cfa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -18,30 +18,22 @@ package org.apache.hadoop.hbase.shaded.protobuf; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; -import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -487,40 +479,4 @@ public void testRegionLockInfo() { + "\"sharedLockCount\":0" + "}]", lockJson); } - - /** - * Test {@link ProtobufUtil#toCell(Cell)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell)} conversion - * methods when it contains tags. - */ - @Test - public void testCellConversionWithTags() { - String tagStr = "tag-1"; - byte tagType = (byte)10; - Tag tag = new ArrayBackedTag(tagType, tagStr); - - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); - cellBuilder.setRow(Bytes.toBytes("row1")); - cellBuilder.setFamily(Bytes.toBytes("f1")); - cellBuilder.setQualifier(Bytes.toBytes("q1")); - cellBuilder.setValue(Bytes.toBytes("value1")); - cellBuilder.setType(Cell.Type.Delete); - cellBuilder.setTags(Collections.singletonList(tag)); - Cell cell = cellBuilder.build(); - - ClientProtos.Result protoResult = - ProtobufUtil.toResult(Result.create(Collections.singletonList(cell))); - assertNotNull(protoResult); - assertEquals(1, protoResult.getCellCount()); - - CellProtos.Cell protoCell = protoResult.getCell(0); - ExtendedCellBuilder decodedBuilder = - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); - Cell decodedCell = ProtobufUtil.toCell(decodedBuilder, protoCell); - List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(1, decodedTags.size()); - Tag decodedTag = decodedTags.get(0); - assertEquals(tagType, decodedTag.getType()); - assertEquals(tagStr, Tag.getValueAsString(decodedTag)); - } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 30071fdfd809..239a12bdc688 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -512,7 +511,6 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { - List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -526,8 +524,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength(), // value length - tags.size() == 0 ? null: tags); + kv.getValueLength()); // value length } } return kv; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 5a95fd8eecb6..12060a742a2b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -34,13 +34,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -49,12 +46,10 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -63,18 +58,11 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.CellImporter; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; -import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -129,9 +117,6 @@ public class TestImportExport { private static final long now = System.currentTimeMillis(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); - public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); - public static final String TEST_ATTR = "source_op"; - public static final String TEST_TAG = "test_tag"; @BeforeClass public static void beforeClass() throws Throwable { @@ -816,147 +801,4 @@ public boolean isWALVisited() { return isVisited; } } - - /** - * Add cell tags to delete mutations, run export and import tool and - * verify that tags are present in import table also. - * @throws Throwable throws Throwable. - */ - @Test - public void testTagsAddition() throws Throwable { - final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(exportTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); - UTIL.getAdmin().createTable(desc); - - Table exportT = UTIL.getConnection().getTable(exportTable); - - //Add first version of QUAL - Put p = new Put(ROW1); - p.addColumn(FAMILYA, QUAL, now, QUAL); - exportT.put(p); - - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); - // Add test attribute to delete mutation. - d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); - exportT.delete(d); - - // Run export too with KeyValueCodecWithTags as Codec. This will ensure that export tool - // will use KeyValueCodecWithTags. - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export - }; - assertTrue(runExport(args)); - // Assert tag exists in exportTable - assertTagExists(exportTable); - - // Create an import table with MetadataController. - final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); - TableDescriptor importTableDesc = TableDescriptorBuilder - .newBuilder(importTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); - UTIL.getAdmin().createTable(importTableDesc); - - // Run import tool. - args = new String[] { - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - importTable.getNameAsString(), - FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); - // Make sure that tags exists in imported table. - assertTagExists(importTable); - } - - private void assertTagExists(TableName table) throws IOException { - List values = new ArrayList<>(); - for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { - Scan scan = new Scan(); - // Make sure to set rawScan to true so that we will get Delete Markers. - scan.setRaw(true); - scan.readAllVersions(); - scan.withStartRow(ROW1); - // Need to use RegionScanner instead of table#getScanner since the latter will - // not return tags since it will go through rpc layer and remove tags intentionally. - RegionScanner scanner = region.getScanner(scan); - scanner.next(values); - if (!values.isEmpty()) { - break; - } - } - boolean deleteFound = false; - for (Cell cell: values) { - if (PrivateCellUtil.isDelete(cell.getType().getCode())) { - deleteFound = true; - List tags = PrivateCellUtil.getTags(cell); - Assert.assertEquals(1, tags.size()); - for (Tag tag : tags) { - Assert.assertEquals(TEST_TAG, Tag.getValueAsString(tag)); - } - } - } - Assert.assertTrue(deleteFound); - } - - /* - This co-proc will add a cell tag to delete mutation. - */ - public static class MetadataController implements RegionCoprocessor, RegionObserver { - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { - if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { - return; - } - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - if (!(m instanceof Delete)) { - continue; - } - byte[] sourceOpAttr = m.getAttribute(TEST_ATTR); - if (sourceOpAttr == null) { - continue; - } - Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); - List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { - Cell cell = cellScanner.current(); - List tags = PrivateCellUtil.getTags(cell); - tags.add(sourceOpTag); - Cell updatedCell = PrivateCellUtil.createCell(cell, tags); - updatedCells.add(updatedCell); - } - m.getFamilyCellMap().clear(); - // Clear and add new Cells to the Mutation. - for (Cell cell : updatedCells) { - Delete d = (Delete) m; - d.add(cell); - } - } - } - } } From 15af9e1f43fe578f217987ed1d56934378d5e9a0 Mon Sep 17 00:00:00 2001 From: SteNicholas Date: Fri, 4 Dec 2020 10:56:53 +0800 Subject: [PATCH 561/769] HBASE-24966 The methods in AsyncTableRegionLocator should not throw IOException directly (#2495) Signed-off-by: Duo Zhang --- .../hadoop/hbase/client/AsyncTableRegionLocator.java | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java index 321f44e87b51..96e3ec4173a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import java.io.IOException; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; @@ -89,7 +88,6 @@ default CompletableFuture getRegionLocation(byte[] row, int rep * Find all the replicas for the region on which the given row is being served. * @param row Row to find. * @return Locations for all the replicas of the row. - * @throws IOException if a remote or network exception occurs */ default CompletableFuture> getRegionLocations(byte[] row) { return getRegionLocations(row, false); @@ -100,7 +98,6 @@ default CompletableFuture> getRegionLocations(byte[] row) * @param row Row to find. * @param reload true to reload information or false to use cached information * @return Locations for all the replicas of the row. - * @throws IOException if a remote or network exception occurs */ CompletableFuture> getRegionLocations(byte[] row, boolean reload); @@ -120,9 +117,8 @@ default CompletableFuture> getRegionLocations(byte[] row) *

    * This is mainly useful for the MapReduce integration. * @return Array of region starting row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture> getStartKeys() throws IOException { + default CompletableFuture> getStartKeys() { return getStartEndKeys().thenApply( startEndKeys -> startEndKeys.stream().map(Pair::getFirst).collect(Collectors.toList())); } @@ -132,9 +128,8 @@ default CompletableFuture> getStartKeys() throws IOException { *

    * This is mainly useful for the MapReduce integration. * @return Array of region ending row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture> getEndKeys() throws IOException { + default CompletableFuture> getEndKeys() { return getStartEndKeys().thenApply( startEndKeys -> startEndKeys.stream().map(Pair::getSecond).collect(Collectors.toList())); } @@ -144,9 +139,8 @@ default CompletableFuture> getEndKeys() throws IOException { *

    * This is mainly useful for the MapReduce integration. * @return Pair of arrays of region starting and ending row keys - * @throws IOException if a remote or network exception occurs */ - default CompletableFuture>> getStartEndKeys() throws IOException { + default CompletableFuture>> getStartEndKeys() { return getAllRegionLocations().thenApply( locs -> locs.stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())) .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) From c98ebf49c3778a406d45466c5e03e47e07010fa6 Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 4 Dec 2020 08:49:56 -0800 Subject: [PATCH 562/769] HBASE-25354 Update create-release scripts; rotted README Add note on ssh-agent. dev-support/create-release/do-release.sh move gpg check to non-docker context. Also cleanup tmp files when done. dev-support/create-release/hbase-rm/Dockerfile dev-support/create-release/mac-sshd-gpg-agent/Dockerfile Hack to update packages... the old ones no longer available. dev-support/create-release/release-util.sh Allow that there are no JIRA changes in a release. Good for testing. --- dev-support/create-release/README.txt | 18 ++++++---- .../create-release/do-release-docker.sh | 2 +- dev-support/create-release/do-release.sh | 25 ++++++++----- .../create-release/hbase-rm/Dockerfile | 2 +- .../mac-sshd-gpg-agent/Dockerfile | 2 +- dev-support/create-release/release-util.sh | 36 ++++++++++++++----- 6 files changed, 57 insertions(+), 28 deletions(-) diff --git a/dev-support/create-release/README.txt b/dev-support/create-release/README.txt index 4a457ddc09ec..e696574507f9 100644 --- a/dev-support/create-release/README.txt +++ b/dev-support/create-release/README.txt @@ -9,19 +9,21 @@ To run a build w/o invoking docker (not recommended!), use _do_release.sh_. Both scripts will query interactively for needed parameters and passphrases. For explanation of the parameters, execute: + $ release-build.sh --help -Before starting the RC build, run a reconciliation of what is in -JIRA with what is in the commit log. Make sure they align and that -anomalies are explained up in JIRA. +Before starting the RC build, run a reconciliation of what is in JIRA with +what is in the commit log. Make sure they align and that anomalies are +explained up in JIRA. See http://hbase.apache.org/book.html#maven.release -Regardless of where your release build will run (locally, locally in docker, on a remote machine, -etc) you will need a local gpg-agent with access to your secret keys. A quick way to tell gpg -to clear out state and start a gpg-agent is via the following command phrase: +Regardless of where your release build will run (locally, locally in docker, +on a remote machine, etc) you will need a local gpg-agent with access to your +secret keys. A quick way to tell gpg to clear out state and start a gpg-agent +is via the following command phrase: -$ gpgconf --kill all && gpg-connect-agent /bye + $ gpgconf --kill all && gpg-connect-agent /bye Before starting an RC build, make sure your local gpg-agent has configs to properly handle your credentials, especially if you want to avoid @@ -33,6 +35,8 @@ on caching the unlocked secret via ~/.gnupg/gpg-agent.conf default-cache-ttl 86400 max-cache-ttl 86400 +Similarly, run ssh-agent with your ssh key added if building with docker. + Running a build on GCE is easy enough. Here are some notes if of use. Create an instance. 4CPU/15G/10G disk seems to work well enough. Once up, run the below to make your machine fit for RC building: diff --git a/dev-support/create-release/do-release-docker.sh b/dev-support/create-release/do-release-docker.sh index e863cb373a0c..b48581f9165b 100755 --- a/dev-support/create-release/do-release-docker.sh +++ b/dev-support/create-release/do-release-docker.sh @@ -302,7 +302,7 @@ if [ "${HOST_OS}" == "DARWIN" ]; then > "${WORKDIR}/gpg-agent-proxy.known_hosts" if [ -s "${WORKDIR}/gpg-agent-proxy.known_hosts" ]; then echo "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." - echo "The following entry(ies) arre missing:" + echo "The following entry(ies) are missing:" sed -e 's/^/ /' "${WORKDIR}/gpg-agent-proxy.known_hosts" read -r -p "Okay to add these entries to ${HOME}/.ssh/known_hosts? [y/n] " ANSWER if [ "$ANSWER" != "y" ]; then diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index 9500801c247b..5566b36c21e2 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -17,6 +17,10 @@ # limitations under the License. # +# Make a tmp dir into which we put files cleaned-up on exit. +TMPDIR=$(mktemp -d) +trap "rm -rf $TMPDIR" EXIT + set -e # Use the adjacent do-release-docker.sh instead, if you can. # Otherwise, this runs core of the release creation. @@ -84,19 +88,22 @@ if [ "$RUNNING_IN_DOCKER" = "1" ]; then else # Outside docker, need to ask for information about the release. get_release_info + + # Run this stuff when not in docker to check gpg. + gpg_test_file="${TMPDIR}/gpg_test.$$.txt" + echo "Testing gpg signing ${GPG} ${GPG_ARGS[@]} --detach --armor --sign ${gpg_test_file}" + echo "foo" > "${gpg_test_file}" + if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign "${gpg_test_file}" ; then + gpg_agent_help + fi + # In --batch mode we have to be explicit about what we are verifying + if ! "${GPG}" "${GPG_ARGS[@]}" --verify "${gpg_test_file}.asc" "${gpg_test_file}" ; then + gpg_agent_help + fi fi GPG_TTY="$(tty)" export GPG_TTY -echo "Testing gpg signing." -echo "foo" > gpg_test.txt -if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign gpg_test.txt ; then - gpg_agent_help -fi -# In --batch mode we have to be explicit about what we are verifying -if ! "${GPG}" "${GPG_ARGS[@]}" --verify gpg_test.txt.asc gpg_test.txt ; then - gpg_agent_help -fi if [[ -z "$RELEASE_STEP" ]]; then # If doing all stages, leave out 'publish-snapshot' diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index 2c29974cfbba..26cb7e51abb3 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -34,7 +34,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ libxml2-dev='2.9.4+dfsg1-*' \ lsof='4.89+dfsg-*' \ maven='3.6.0-*' \ - openjdk-8-jdk='8u252-b09-*' \ + openjdk-8-jdk='8u*' \ python-pip='9.0.1-*' \ subversion='1.9.7-*' \ wget='1.19.4-*' \ diff --git a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile index a71d867613b1..3d206dc83365 100644 --- a/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile +++ b/dev-support/create-release/mac-sshd-gpg-agent/Dockerfile @@ -83,7 +83,7 @@ FROM ubuntu:18.04 # into the container rather than launching a new docker container. RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && DEBIAN_FRONTEND=noninteractive apt-get -qq -y install --no-install-recommends \ - openssh-server=1:7.6p1-4ubuntu0.3 gnupg2=2.2.4-1ubuntu1.2 && mkdir /run/sshd \ + openssh-server=1:7.6p1-4ubuntu0.3 gnupg2=2.2.4-1ubuntu1.3 && mkdir /run/sshd \ && echo "StreamLocalBindUnlink yes" >> /etc/ssh/sshd_config \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 64654bba4b86..4dffd672c2ae 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -501,10 +501,17 @@ function update_releasenotes { local jira_project local timing_token timing_token="$(start_step)" + changelog="CHANGELOG.${jira_fix_version}.md" + releasenotes="RELEASENOTES.${jira_fix_version}.md" + if [ -f ${changelog} ]; then + rm ${changelog} + fi + if [ -f ${releasenotes} ]; then + rm ${releasenotes} + fi jira_project="$(get_jira_name "$(basename "$project_dir")")" "${YETUS_HOME}/bin/releasedocmaker" -p "${jira_project}" --fileversions -v "${jira_fix_version}" \ - -l --sortorder=newer --skip-credits - pwd + -l --sortorder=newer --skip-credits || true # First clear out the changes written by previous RCs. if [ -f "${project_dir}/CHANGES.md" ]; then sed -i -e \ @@ -517,24 +524,35 @@ function update_releasenotes { "${project_dir}/RELEASENOTES.md" || true fi + # Yetus will not generate CHANGES if no JIRAs fixed against the release version + # (Could happen if a release were bungled such that we had to make a new one + # without changes) + if [ ! -f "${changelog}" ]; then + echo -e "## Release ${jira_fix_version} - Unreleased (as of `date`)\nNo changes\n" > "${changelog}" + fi + if [ ! -f "${releasenotes}" ]; then + echo -e "# hbase ${jira_fix_version} Release Notes\nNo changes\n" > "${releasenotes}" + fi + # The releasedocmaker call above generates RELEASENOTES.X.X.X.md and CHANGELOG.X.X.X.md. if [ -f "${project_dir}/CHANGES.md" ]; then # To insert into project's CHANGES.md...need to cut the top off the # CHANGELOG.X.X.X.md file removing license and first line and then # insert it after the license comment closing where we have a # DO NOT REMOVE marker text! - sed -i -e '/## Release/,$!d' "CHANGELOG.${jira_fix_version}.md" - sed -i -e "/DO NOT REMOVE/r CHANGELOG.${jira_fix_version}.md" "${project_dir}/CHANGES.md" + sed -i -e '/## Release/,$!d' "${changelog}" + sed -i -e '2,${/^# HBASE Changelog/d;}' "${project_dir}/CHANGES.md" + sed -i -e "/DO NOT REMOVE/r ${changelog}" "${project_dir}/CHANGES.md" else - mv "CHANGELOG.${jira_fix_version}.md" "${project_dir}/CHANGES.md" + mv "${changelog}" "${project_dir}/CHANGES.md" fi if [ -f "${project_dir}/RELEASENOTES.md" ]; then # Similar for RELEASENOTES but slightly different. - sed -i -e '/Release Notes/,$!d' "RELEASENOTES.${jira_fix_version}.md" - sed -i -e "/DO NOT REMOVE/r RELEASENOTES.${jira_fix_version}.md" \ - "${project_dir}/RELEASENOTES.md" + sed -i -e '/Release Notes/,$!d' "${releasenotes}" + sed -i -e '2,${/^# RELEASENOTES/d;}' "${project_dir}/RELEASENOTES.md" + sed -i -e "/DO NOT REMOVE/r ${releasenotes}" "${project_dir}/RELEASENOTES.md" else - mv "RELEASENOTES.${jira_fix_version}.md" "${project_dir}/RELEASENOTES.md" + mv "${releasenotes}" "${project_dir}/RELEASENOTES.md" fi stop_step "${timing_token}" } From 59765fd5b039ea1d6e12d783bd56600e1e659fe4 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 5 Dec 2020 08:55:59 -0800 Subject: [PATCH 563/769] HBASE-25361 [Flakey Tests] branch-2 TestMetaRegionLocationCache.testStandByMetaLocations (#2736) Add a bit of a wait before testing if online replicas match the zk count. It might take a while for all replicas to come online. Signed-off-by: huaxiangsun --- .../hadoop/hbase/client/TestMetaRegionLocationCache.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index d42c1240f9ef..577e15cedfb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -98,6 +98,14 @@ private void verifyCachedMetaLocations(HMaster master) throws Exception { assertFalse(metaHRLs.isEmpty()); ZKWatcher zk = master.getZooKeeper(); List metaZnodes = zk.getMetaReplicaNodes(); + // Wait till all replicas available. + while (master.getMetaRegionLocationCache().getMetaRegionLocations().get().size() != + metaZnodes.size()) { + Thread.sleep(1000); + if (++retries == 10) { + break; + } + } assertEquals(metaZnodes.size(), metaHRLs.size()); List actualHRLs = getCurrentMetaLocations(zk); Collections.sort(metaHRLs); From 5bd479b9ca3741431fe7a039478833f6fbe4f632 Mon Sep 17 00:00:00 2001 From: stack Date: Sat, 5 Dec 2020 14:00:18 -0800 Subject: [PATCH 564/769] HBASE-25361 [Flakey Tests] branch-2 TestMetaRegionLocationCache.testStandByMetaLocations (#2736) Addendum; Reset counter so we actually wait in the new loop added by the above. --- .../apache/hadoop/hbase/client/TestMetaRegionLocationCache.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index 577e15cedfb9..2bcddc9ea7f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -99,6 +99,7 @@ private void verifyCachedMetaLocations(HMaster master) throws Exception { ZKWatcher zk = master.getZooKeeper(); List metaZnodes = zk.getMetaReplicaNodes(); // Wait till all replicas available. + retries = 0; while (master.getMetaRegionLocationCache().getMetaRegionLocations().get().size() != metaZnodes.size()) { Thread.sleep(1000); @@ -119,6 +120,7 @@ private void verifyCachedMetaLocations(HMaster master) throws Exception { @Test public void testStandByMetaLocations() throws Exception { HMaster standBy = TEST_UTIL.getMiniHBaseCluster().startMaster().getMaster(); + standBy.isInitialized(); verifyCachedMetaLocations(standBy); } From 420df8454c8076f7ff1857fb015f0305a62ca944 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 7 Dec 2020 21:49:04 +0800 Subject: [PATCH 565/769] HBASE-25336 Use Address instead of InetSocketAddress in RpcClient implementation (#2716) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/ipc/AbstractRpcClient.java | 60 ++++--------------- .../hbase/ipc/BlockingRpcConnection.java | 25 ++------ .../hadoop/hbase/ipc/NettyRpcConnection.java | 27 ++------- .../hadoop/hbase/ipc/RpcConnection.java | 24 +++++++- 4 files changed, 39 insertions(+), 97 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 7a7b848304ff..e9ec6a92ee93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -22,9 +22,7 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.wrapException; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.SocketAddress; -import java.net.UnknownHostException; import java.util.Collection; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -320,7 +318,7 @@ private int nextCallId() { * @return A pair with the Message response and the Cell data (if any). */ private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc, - Message param, Message returnType, final User ticket, final InetSocketAddress isa) + Message param, Message returnType, final User ticket, final Address isa) throws ServiceException { BlockingRpcCallback done = new BlockingRpcCallback<>(); callMethod(md, hrc, param, returnType, ticket, isa, done); @@ -392,7 +390,7 @@ private void onCallFinished(Call call, HBaseRpcController hrc, Address addr, Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController hrc, final Message param, Message returnType, final User ticket, - final InetSocketAddress inetAddr, final RpcCallback callback) { + final Address addr, final RpcCallback callback) { final MetricsConnection.CallStats cs = MetricsConnection.newCallStats(); cs.setStartTime(EnvironmentEdgeManager.currentTime()); @@ -406,7 +404,6 @@ Call callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController cs.setNumActionsPerServer(numActions); } - final Address addr = Address.fromSocketAddress(inetAddr); final AtomicInteger counter = concurrentCounterCache.getUnchecked(addr); Call call = new Call(nextCallId(), md, param, hrc.cellScanner(), returnType, hrc.getCallTimeout(), hrc.getPriority(), new RpcCallback() { @@ -520,13 +517,6 @@ private static class AbstractRpcChannel { protected final Address addr; - // We cache the resolved InetSocketAddress for the channel so we do not do a DNS lookup - // per method call on the channel. If the remote target is removed or reprovisioned and - // its identity changes a new channel with a newly resolved InetSocketAddress will be - // created as part of retry, so caching here is fine. - // Normally, caching an InetSocketAddress is an anti-pattern. - protected InetSocketAddress isa; - protected final AbstractRpcClient rpcClient; protected final User ticket; @@ -576,23 +566,9 @@ protected BlockingRpcChannelImplementation(AbstractRpcClient rpcClient, @Override public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController controller, - Message param, Message returnType) throws ServiceException { - // Look up remote address upon first call - if (isa == null) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - isa = Address.toSocketAddress(addr); - if (isa.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - isa = null; - throw new ServiceException(new UnknownHostException(addr + " could not be resolved")); - } - } - return rpcClient.callBlockingMethod(md, configureRpcController(controller), - param, returnType, ticket, isa); + Message param, Message returnType) throws ServiceException { + return rpcClient.callBlockingMethod(md, configureRpcController(controller), param, returnType, + ticket, addr); } } @@ -608,29 +584,13 @@ protected RpcChannelImplementation(AbstractRpcClient rpcClient, Address addr, } @Override - public void callMethod(Descriptors.MethodDescriptor md, RpcController controller, - Message param, Message returnType, RpcCallback done) { - HBaseRpcController configuredController = - configureRpcController(Preconditions.checkNotNull(controller, - "RpcController can not be null for async rpc call")); - // Look up remote address upon first call - if (isa == null || isa.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - isa = Address.toSocketAddress(addr); - if (isa.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - isa = null; - controller.setFailed(addr + " could not be resolved"); - return; - } - } + public void callMethod(Descriptors.MethodDescriptor md, RpcController controller, Message param, + Message returnType, RpcCallback done) { + HBaseRpcController configuredController = configureRpcController( + Preconditions.checkNotNull(controller, "RpcController can not be null for async rpc call")); // This method does not throw any exceptions, so the caller must provide a // HBaseRpcController which is used to pass the exceptions. - this.rpcClient.callMethod(md, configuredController, param, returnType, ticket, isa, done); + this.rpcClient.callMethod(md, configuredController, param, returnType, ticket, addr, done); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index ce2bd11f960a..cd8035fd58ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -35,7 +35,6 @@ import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketTimeoutException; -import java.net.UnknownHostException; import java.security.PrivilegedExceptionAction; import java.util.ArrayDeque; import java.util.Locale; @@ -44,7 +43,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ThreadLocalRandom; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -52,7 +50,6 @@ import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback; import org.apache.hadoop.hbase.log.HBaseMarkers; -import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.HBaseSaslRpcClient; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection; @@ -69,11 +66,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; @@ -256,16 +255,7 @@ protected void setupConnection() throws IOException { if (this.rpcClient.localAddr != null) { this.socket.bind(this.rpcClient.localAddr); } - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - InetSocketAddress remoteAddr = Address.toSocketAddress(remoteId.getAddress()); - if (remoteAddr.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } + InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics); NetUtils.connect(this.socket, remoteAddr, this.rpcClient.connectTO); this.socket.setSoTimeout(this.rpcClient.readTO); return; @@ -374,15 +364,8 @@ private boolean setupSaslConnection(final InputStream in2, final OutputStream ou if (this.metrics != null) { this.metrics.incrNsLookups(); } - InetSocketAddress serverAddr = Address.toSocketAddress(remoteId.getAddress()); - if (serverAddr.isUnresolved()) { - if (this.metrics != null) { - this.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } saslRpcClient = new HBaseSaslRpcClient(this.rpcClient.conf, provider, token, - serverAddr.getAddress(), securityInfo, this.rpcClient.fallbackAllowed, + socket.getInetAddress(), securityInfo, this.rpcClient.fallbackAllowed, this.rpcClient.conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 609d2c12ceae..d0a13ca33d6c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -32,17 +32,16 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallEvent; import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback; -import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.NettyHBaseRpcConnectionHeaderHandler; import org.apache.hadoop.hbase.security.NettyHBaseSaslRpcClientHandler; import org.apache.hadoop.hbase.security.SaslChallengeDecoder; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; @@ -210,18 +209,9 @@ private void saslNegotiate(final Channel ch) { Promise saslPromise = ch.eventLoop().newPromise(); final NettyHBaseSaslRpcClientHandler saslHandler; try { - if (this.metrics != null) { - this.metrics.incrNsLookups(); - } - InetSocketAddress serverAddr = Address.toSocketAddress(remoteId.getAddress()); - if (serverAddr.isUnresolved()) { - if (this.metrics != null) { - this.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, provider, token, - serverAddr.getAddress(), securityInfo, rpcClient.fallbackAllowed, this.rpcClient.conf); + ((InetSocketAddress) ch.remoteAddress()).getAddress(), securityInfo, + rpcClient.fallbackAllowed, this.rpcClient.conf); } catch (IOException e) { failInit(ch, e); return; @@ -282,16 +272,7 @@ public void operationComplete(Future future) throws Exception { private void connect() throws UnknownHostException { assert eventLoop.inEventLoop(); LOG.trace("Connecting to {}", remoteId.getAddress()); - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookups(); - } - InetSocketAddress remoteAddr = Address.toSocketAddress(remoteId.getAddress()); - if (remoteAddr.isUnresolved()) { - if (this.rpcClient.metrics != null) { - this.rpcClient.metrics.incrNsLookupsFailed(); - } - throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); - } + InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics); this.channel = new Bootstrap().group(eventLoop).channel(rpcClient.channelClass) .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay()) .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 6749efe66f3e..b2c7eeae4a5a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -18,12 +18,15 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; @@ -122,7 +125,7 @@ protected RpcConnection(Configuration conf, HashedWheelTimer timeoutTimer, Conne this.remoteId = remoteId; } - protected void scheduleTimeoutTask(final Call call) { + protected final void scheduleTimeoutTask(final Call call) { if (call.timeout > 0) { call.timeoutTask = timeoutTimer.newTimeout(new TimerTask() { @@ -137,7 +140,7 @@ public void run(Timeout timeout) throws Exception { } } - protected byte[] getConnectionHeaderPreamble() { + protected final byte[] getConnectionHeaderPreamble() { // Assemble the preamble up in a buffer first and then send it. Writing individual elements, // they are getting sent across piecemeal according to wireshark and then server is messing // up the reading on occasion (the passed in stream is not buffered yet). @@ -153,7 +156,7 @@ protected byte[] getConnectionHeaderPreamble() { return preamble; } - protected ConnectionHeader getConnectionHeader() { + protected final ConnectionHeader getConnectionHeader() { final ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); builder.setServiceName(remoteId.getServiceName()); final UserInformation userInfoPB = provider.getUserInfo(remoteId.ticket); @@ -176,6 +179,21 @@ protected ConnectionHeader getConnectionHeader() { return builder.build(); } + protected final InetSocketAddress getRemoteInetAddress(MetricsConnection metrics) + throws UnknownHostException { + if (metrics != null) { + metrics.incrNsLookups(); + } + InetSocketAddress remoteAddr = Address.toSocketAddress(remoteId.getAddress()); + if (remoteAddr.isUnresolved()) { + if (metrics != null) { + metrics.incrNsLookupsFailed(); + } + throw new UnknownHostException(remoteId.getAddress() + " could not be resolved"); + } + return remoteAddr; + } + protected abstract void callTimeout(Call call); public ConnectionId remoteId() { From 956a4c3a57fa5ea6a9062422c21a1761c67979f8 Mon Sep 17 00:00:00 2001 From: SWH12 <34267571+SWH12@users.noreply.github.com> Date: Mon, 7 Dec 2020 22:12:56 +0800 Subject: [PATCH 566/769] HBASE-25366 [Documentation] Fix spelling error in sync_replication.adoc(#2744) Signed-off-by: Duo Zhang --- src/main/asciidoc/_chapters/sync_replication.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/sync_replication.adoc b/src/main/asciidoc/_chapters/sync_replication.adoc index d28b9a9731a3..82b3c7e21780 100644 --- a/src/main/asciidoc/_chapters/sync_replication.adoc +++ b/src/main/asciidoc/_chapters/sync_replication.adoc @@ -99,7 +99,7 @@ hbase> transit_peer_sync_replication_state '1', 'ACTIVE' Case.3 How to operate when active cluster crashed:: If the active cluster has been crashed (it may be not reachable now), so let's just transit the standby cluster to -DOWNGRANDE_ACTIVE state, and after that, we should redirect all the requests from client to the DOWNGRADE_ACTIVE cluster. +DOWNGRADE_ACTIVE state, and after that, we should redirect all the requests from client to the DOWNGRADE_ACTIVE cluster. [source,ruby] ---- From a15c1bea9552c0d61dc3ca18840b86261e2399e0 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Mon, 7 Dec 2020 23:00:48 +0530 Subject: [PATCH 567/769] HBASE-25277 postScannerFilterRow impacts Scan performance a lot in HBase 2.x (#2675) * HBASE-25277 postScannerFilterRow impacts Scan performance a lot in HBase 2.x 1. Added a check for Object class in RegionCoprocessorHost to avoid wrong initialization of hasCustomPostScannerFilterRow 2. Removed dummy implementation of postScannerFilterRow from AccessController, VisibilityController & ConstraintProcessor (which are not required currently) Signed-off-by Ramkrishna S Vasudevan Signed-off-by Anoop Sam John Signed-off-by: Duo Zhang --- .../hbase/constraint/ConstraintProcessor.java | 18 ++---- .../regionserver/RegionCoprocessorHost.java | 17 ++++-- .../security/access/AccessController.java | 7 --- .../visibility/VisibilityController.java | 7 --- .../TestRegionCoprocessorHost.java | 57 +++++++++++++++++-- 5 files changed, 69 insertions(+), 37 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index 6aa5d977b678..b0a04c5044ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -22,20 +22,19 @@ import java.util.List; import java.util.Optional; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.yetus.audience.InterfaceAudience; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /*** * Processes multiple {@link Constraint Constraints} on a given table. @@ -98,11 +97,4 @@ public void prePut(ObserverContext e, Put put, } // if we made it here, then the Put is valid } - - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 5ebf7e1c1590..7ed23f695ecd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -79,7 +79,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.org.apache.commons.collections4.map.AbstractReferenceMap; @@ -102,6 +101,13 @@ public class RegionCoprocessorHost // optimization: no need to call postScannerFilterRow, if no coprocessor implements it private final boolean hasCustomPostScannerFilterRow; + /* + * Whether any configured CPs override postScannerFilterRow hook + */ + public boolean hasCustomPostScannerFilterRow() { + return hasCustomPostScannerFilterRow; + } + /** * * Encapsulation of the environment of each coprocessor @@ -275,11 +281,10 @@ public RegionCoprocessorHost(final HRegion region, out: for (RegionCoprocessorEnvironment env: coprocEnvironments) { if (env.getInstance() instanceof RegionObserver) { Class clazz = env.getInstance().getClass(); - for(;;) { - if (clazz == null) { - // we must have directly implemented RegionObserver - hasCustomPostScannerFilterRow = true; - break out; + for (;;) { + if (clazz == Object.class) { + // we dont need to look postScannerFilterRow into Object class + break; // break the inner loop } try { clazz.getDeclaredMethod("postScannerFilterRow", ObserverContext.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 3a6c3aae657b..75bc73ccdcd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1848,13 +1848,6 @@ public void postScannerClose(final ObserverContext scannerOwners.remove(s); } - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } - /** * Verify, when servicing an RPC, that the caller is the scanner owner. * If so, we assume that access control is correctly enforced based on diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 37f25a83ea72..7c4b7abb8bff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -678,13 +678,6 @@ private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOExc return PrivateCellUtil.createCell(newCell, tags); } - @Override - public boolean postScannerFilterRow(final ObserverContext e, - final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { - // 'default' in RegionObserver might do unnecessary copy for Off heap backed Cells. - return hasMore; - } - /****************************** VisibilityEndpoint service related methods ******************************/ @Override public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java index 423a412f75c1..b0188d9b7ce5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java @@ -22,11 +22,14 @@ import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR; import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.Optional; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -58,7 +61,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import java.io.IOException; @Category({SmallTests.class}) public class TestRegionCoprocessorHost { @@ -79,19 +81,36 @@ public class TestRegionCoprocessorHost { @Before public void setup() throws IOException { + init(null); + } + + private void init(Boolean flag) throws IOException { conf = HBaseConfiguration.create(); conf.setBoolean(COPROCESSORS_ENABLED_CONF_KEY, true); conf.setBoolean(USER_COPROCESSORS_ENABLED_CONF_KEY, true); TableName tableName = TableName.valueOf(name.getMethodName()); regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); - // config a same coprocessor with system coprocessor - TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setCoprocessor(SimpleRegionObserver.class.getName()).build(); + TableDescriptor tableDesc = null; + if (flag == null) { + // configure a coprocessor which override postScannerFilterRow + tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setCoprocessor(SimpleRegionObserver.class.getName()).build(); + } else if (flag) { + // configure a coprocessor which don't override postScannerFilterRow + tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setCoprocessor(TempRegionObserver.class.getName()).build(); + } else { + // configure two coprocessors, one don't override postScannerFilterRow but another one does + conf.set(REGION_COPROCESSOR_CONF_KEY, TempRegionObserver.class.getName()); + tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setCoprocessor(SimpleRegionObserver.class.getName()).build(); + } region = mock(HRegion.class); when(region.getRegionInfo()).thenReturn(regionInfo); when(region.getTableDescriptor()).thenReturn(tableDesc); rsServices = mock(RegionServerServices.class); } + @Test public void testLoadDuplicateCoprocessor() throws Exception { conf.setBoolean(SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR, true); @@ -158,6 +177,27 @@ public void testPreMemStoreCompactionCompactScannerOpen() throws IOException { verifyScanInfo(newScanInfo); } + @Test + public void testPostScannerFilterRow() throws IOException { + // By default SimpleRegionObserver is set as region coprocessor which implements + // postScannerFilterRow + RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf); + assertTrue("Region coprocessor implement postScannerFilterRow", + host.hasCustomPostScannerFilterRow()); + + // Set a region CP which doesn't implement postScannerFilterRow + init(true); + host = new RegionCoprocessorHost(region, rsServices, conf); + assertFalse("Region coprocessor implement postScannerFilterRow", + host.hasCustomPostScannerFilterRow()); + + // Set multiple region CPs, in which one implements postScannerFilterRow + init(false); + host = new RegionCoprocessorHost(region, rsServices, conf); + assertTrue("Region coprocessor doesn't implement postScannerFilterRow", + host.hasCustomPostScannerFilterRow()); + } + private void verifyScanInfo(ScanInfo newScanInfo) { assertEquals(KeepDeletedCells.TRUE, newScanInfo.getKeepDeletedCells()); assertEquals(MAX_VERSIONS, newScanInfo.getMaxVersions()); @@ -175,4 +215,13 @@ private ScanInfo getScanInfo() { CellComparator.getInstance(), true); } + /* + * Simple region coprocessor which doesn't override postScannerFilterRow + */ + public static class TempRegionObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + } } From 22d3c9dd12fc47d626546105c8f4247bfa8387e5 Mon Sep 17 00:00:00 2001 From: niuyulin Date: Tue, 8 Dec 2020 10:41:25 +0800 Subject: [PATCH 568/769] Add niuyulin as committer --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 05fde4f5453d..deab43804362 100755 --- a/pom.xml +++ b/pom.xml @@ -471,6 +471,12 @@ ndimiduk@apache.org -8 + + niuyulin + Yulin Niu + niuyulin@apache.org + +8 + nkeywal Nicolas Liochon From 8c752300bdd863a94dbf0aa8e4852902d29d26c8 Mon Sep 17 00:00:00 2001 From: XinSun Date: Tue, 8 Dec 2020 10:49:39 +0800 Subject: [PATCH 569/769] Add Xin Sun as a developer --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index deab43804362..fe53e353b68f 100755 --- a/pom.xml +++ b/pom.xml @@ -686,6 +686,12 @@ wangzheng@apache.org +8 + + sunxin + Xin Sun + sunxin@apache.org + +8 + From 038a7bafe446766d8f00942f8c267888ebd4c40c Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Tue, 8 Dec 2020 17:58:00 +0530 Subject: [PATCH 570/769] HBASE-25328 : Add builder method to create Tags Closes #2707 Signed-off-by: Anoop Sam John Signed-off-by: Geoffrey Jacoby Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/RawCell.java | 8 ++ .../org/apache/hadoop/hbase/TagBuilder.java | 50 ++++++++++++ .../hadoop/hbase/TagBuilderFactory.java | 73 +++++++++++++++++ .../apache/hadoop/hbase/TestTagBuilder.java | 78 +++++++++++++++++++ 4 files changed, 209 insertions(+) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index ea598d21ca3b..85f8b278de47 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import java.util.Iterator; +import java.util.List; import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; @@ -64,4 +65,11 @@ public static void checkForTagsLength(int tagsLength) { throw new IllegalArgumentException("tagslength " + tagsLength + " > " + MAX_TAGS_LENGTH); } } + + /** + * @return A new cell which is having the extra tags also added to it. + */ + public static Cell createCell(Cell cell, List tags) { + return PrivateCellUtil.createCell(cell, tags); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java new file mode 100644 index 000000000000..372144c6c268 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilder.java @@ -0,0 +1,50 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Builder implementation to create {@link Tag} + * Call setTagValue(byte[]) method to create {@link ArrayBackedTag} + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +public interface TagBuilder { + /** + * Set type of the tag. + * @param tagType type of the tag + * @return {@link TagBuilder} + */ + TagBuilder setTagType(byte tagType); + + /** + * Set the value of the tag. + * @param tagBytes tag bytes. + * @return {@link TagBuilder} + */ + TagBuilder setTagValue(byte[] tagBytes); + + /** + * Build the tag. + * @return {@link Tag} + */ + Tag build(); +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java new file mode 100644 index 000000000000..40744f91abf0 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagBuilderFactory.java @@ -0,0 +1,73 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Factory to create Tags. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +public final class TagBuilderFactory { + + public static TagBuilder create() { + return new TagBuilderImpl(); + } +} + +/** + * Builder implementation to create {@link Tag}
    + * Call setTagValue(byte[]) method to create {@link ArrayBackedTag} + */ +class TagBuilderImpl implements TagBuilder { + // This assumes that we never create tag with value less than 0. + private byte tagType = (byte)-1; + private byte[] tagBytes = null; + public static final String TAG_TYPE_NOT_SET_EXCEPTION = "Need to set type of the tag."; + public static final String TAG_VALUE_NULL_EXCEPTION = "TagBytes can't be null"; + + @Override + public TagBuilder setTagType(byte tagType) { + this.tagType = tagType; + return this; + } + + @Override + public TagBuilder setTagValue(byte[] tagBytes) { + this.tagBytes = tagBytes; + return this; + } + + private void validate() { + if (tagType == -1) { + throw new IllegalArgumentException(TAG_TYPE_NOT_SET_EXCEPTION); + } + if (tagBytes == null) { + throw new IllegalArgumentException(TAG_VALUE_NULL_EXCEPTION); + } + } + + @Override + public Tag build() { + validate(); + return new ArrayBackedTag(tagType, tagBytes); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java new file mode 100644 index 000000000000..b50aa2df645b --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTagBuilder.java @@ -0,0 +1,78 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.nio.ByteBuffer; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MiscTests.class, SmallTests.class}) +public class TestTagBuilder { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTagBuilder.class); + + @Test + public void testArrayBackedTagBuilder() { + byte type = (byte)50; + String value = "Array-Backed-Tag"; + TagBuilder builder = TagBuilderFactory.create(); + assertTrue(builder instanceof TagBuilderImpl); + builder.setTagType(type); + builder.setTagValue(Bytes.toBytes(value)); + Tag tag = builder.build(); + assertEquals(value, Tag.getValueAsString(tag)); + assertEquals(type, tag.getType()); + } + + @Test + public void testErrorMessages() { + String arrayValue = "Array-Backed-Tag"; + TagBuilder builder = TagBuilderFactory.create(); + builder.setTagValue(Bytes.toBytes(arrayValue)); + try { + // Dont set type for the tag. + builder.build(); + fail("Shouldn't have come here."); + } catch(IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains(TagBuilderImpl.TAG_TYPE_NOT_SET_EXCEPTION)); + } + + byte type = (byte)50; + builder = TagBuilderFactory.create(); + builder.setTagType(type); + try { + // Need to Call setTagValue(byte[]) to set the value. + builder.build(); + fail("Shouldn't have come here."); + } catch(IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains(TagBuilderImpl.TAG_VALUE_NULL_EXCEPTION)); + } + } +} From 14af0072668202655b2059f587c735a8cfc2d5af Mon Sep 17 00:00:00 2001 From: haxiaolin Date: Tue, 8 Dec 2020 17:21:16 +0800 Subject: [PATCH 571/769] HBASE-25363 Improve performance of HFileLinkCleaner by using ReadWriteLock instead of synchronize --- .../master/cleaner/HFileLinkCleaner.java | 95 +++++++++++-------- 1 file changed, 55 insertions(+), 40 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java index a99c784d2ac8..b19e174be0c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.cleaner; import java.io.IOException; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -44,63 +45,75 @@ public class HFileLinkCleaner extends BaseHFileCleanerDelegate { private static final Logger LOG = LoggerFactory.getLogger(HFileLinkCleaner.class); private FileSystem fs = null; + private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @Override - public synchronized boolean isFileDeletable(FileStatus fStat) { - if (this.fs == null) return false; - Path filePath = fStat.getPath(); - // HFile Link is always deletable - if (HFileLink.isHFileLink(filePath)) return true; + public boolean isFileDeletable(FileStatus fStat) { + lock.readLock().lock(); + try { + if (this.fs == null) { + return false; + } + Path filePath = fStat.getPath(); + // HFile Link is always deletable + if (HFileLink.isHFileLink(filePath)) { + return true; + } - // If the file is inside a link references directory, means that it is a back ref link. - // The back ref can be deleted only if the referenced file doesn't exists. - Path parentDir = filePath.getParent(); - if (HFileLink.isBackReferencesDir(parentDir)) { - Path hfilePath = null; - try { - // Also check if the HFile is in the HBASE_TEMP_DIRECTORY; this is where the referenced - // file gets created when cloning a snapshot. - hfilePath = HFileLink.getHFileFromBackReference( - new Path(CommonFSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath); - if (fs.exists(hfilePath)) { - return false; - } - // check whether the HFileLink still exists in mob dir. - hfilePath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(getConf()), filePath); - if (fs.exists(hfilePath)) { + // If the file is inside a link references directory, means that it is a back ref link. + // The back ref can be deleted only if the referenced file doesn't exists. + Path parentDir = filePath.getParent(); + if (HFileLink.isBackReferencesDir(parentDir)) { + Path hfilePath = null; + try { + // Also check if the HFile is in the HBASE_TEMP_DIRECTORY; this is where the referenced + // file gets created when cloning a snapshot. + hfilePath = HFileLink.getHFileFromBackReference(new Path( + CommonFSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath); + if (fs.exists(hfilePath)) { + return false; + } + // check whether the HFileLink still exists in mob dir. + hfilePath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(getConf()), filePath); + if (fs.exists(hfilePath)) { + return false; + } + hfilePath = HFileLink.getHFileFromBackReference(CommonFSUtils.getRootDir(getConf()), + filePath); + return !fs.exists(hfilePath); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Couldn't verify if the referenced file still exists, keep it just in case: " + + hfilePath); + } return false; } - hfilePath = - HFileLink.getHFileFromBackReference(CommonFSUtils.getRootDir(getConf()), filePath); - return !fs.exists(hfilePath); + } + + // HFile is deletable only if has no links + Path backRefDir = null; + try { + backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName()); + return CommonFSUtils.listStatus(fs, backRefDir) == null; } catch (IOException e) { if (LOG.isDebugEnabled()) { - LOG.debug("Couldn't verify if the referenced file still exists, keep it just in case: " + - hfilePath); + LOG.debug( + "Couldn't get the references, not deleting file, just in case. filePath=" + + filePath + ", backRefDir=" + backRefDir); } return false; } - } - - // HFile is deletable only if has no links - Path backRefDir = null; - try { - backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName()); - return CommonFSUtils.listStatus(fs, backRefDir) == null; - } catch (IOException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Couldn't get the references, not deleting file, just in case. filePath=" - + filePath + ", backRefDir=" + backRefDir); - } - return false; + } finally { + lock.readLock().unlock(); } } @Override - public synchronized void setConf(Configuration conf) { + public void setConf(Configuration conf) { super.setConf(conf); // setup filesystem + lock.writeLock().lock(); try { this.fs = FileSystem.get(this.getConf()); } catch (IOException e) { @@ -109,6 +122,8 @@ public synchronized void setConf(Configuration conf) { + FileSystem.FS_DEFAULT_NAME_KEY + "=" + getConf().get(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS)); } + } finally { + lock.writeLock().unlock(); } } } From fa8fb2dd7024663a225bd7d5cc8161ed8d746722 Mon Sep 17 00:00:00 2001 From: z-york Date: Tue, 8 Dec 2020 23:34:18 -0800 Subject: [PATCH 572/769] HBASE-25362 Fix quoting in hbase-vote to avoid voting build failures (#2737) Signed-off-by: Nick Dimiduk Signed-off-by: Stephen Wu --- dev-support/hbase-vote.sh | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/dev-support/hbase-vote.sh b/dev-support/hbase-vote.sh index d608f1e5e4a4..11267757b253 100755 --- a/dev-support/hbase-vote.sh +++ b/dev-support/hbase-vote.sh @@ -40,10 +40,13 @@ Usage: ${SCRIPT} -s | --source [-k | --key ] [-f | --keys-file- https://downloads.apache.org/hbase/KEYS -o | --output-dir '' directory which has the stdout and stderr of each verification target -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests - -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0> Defaults to unset + -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0 -D skipTests> Defaults to unset __EOF } +MVN_PROFILES=() +MVN_PROPERTIES=() + while ((${#})); do case "${1}" in -h | --help ) @@ -57,9 +60,9 @@ while ((${#})); do -o | --output-dir ) OUTPUT_DIR="${2}"; shift 2 ;; -P ) - MVN_PROFILES="-P ${2}"; shift 2 ;; + MVN_PROFILES+=("-P ${2}"); shift 2 ;; -D ) - MVN_PROPERTIES="-D ${2}"; shift 2 ;; + MVN_PROPERTIES+=("-D ${2}"); shift 2 ;; * ) usage >&2; exit 1 ;; esac @@ -92,8 +95,8 @@ if [ ! -d "${OUTPUT_DIR}" ]; then fi # Maven profile must be provided -if [ -z "${MVN_PROFILES}" ]; then - MVN_PROFILES="-P runAllTests" +if [ ${#MVN_PROFILES[@]} -eq 0 ]; then + MVN_PROFILES=("-P runAllTests") fi OUTPUT_PATH_PREFIX="${OUTPUT_DIR}"/"${HBASE_RC_VERSION}" @@ -145,17 +148,18 @@ function unzip_from_source() { function rat_test() { rm -f "${OUTPUT_PATH_PREFIX}"_rat_test - mvn clean apache-rat:check "${MVN_PROPERTIES}" 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 + mvn clean apache-rat:check "${MVN_PROPERTIES[@]}" 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_rat_test && RAT_CHECK_PASSED=1 } function build_from_source() { rm -f "${OUTPUT_PATH_PREFIX}"_build_from_source - mvn clean install "${MVN_PROPERTIES}" -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 + # Hardcode skipTests for faster build. Testing is covered later. + mvn clean install "${MVN_PROPERTIES[@]}" -DskipTests 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_build_from_source && BUILD_FROM_SOURCE_PASSED=1 } function run_tests() { rm -f "${OUTPUT_PATH_PREFIX}"_run_tests - mvn package "${MVN_PROFILES}" "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 + mvn package "${MVN_PROFILES[@]}" "${MVN_PROPERTIES[@]}" -Dsurefire.rerunFailingTestsCount=3 2>&1 | tee "${OUTPUT_PATH_PREFIX}"_run_tests && UNIT_TEST_PASSED=1 } function execute() { @@ -167,11 +171,11 @@ function print_when_exit() { * Signature: $( ((SIGNATURE_PASSED)) && echo "ok" || echo "failed" ) * Checksum : $( ((CHECKSUM_PASSED)) && echo "ok" || echo "failed" ) * Rat check (${JAVA_VERSION}): $( ((RAT_CHECK_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean apache-rat:check "${MVN_PROPERTIES}" + - mvn clean apache-rat:check ${MVN_PROPERTIES[@]} * Built from source (${JAVA_VERSION}): $( ((BUILD_FROM_SOURCE_PASSED)) && echo "ok" || echo "failed" ) - - mvn clean install -DskipTests "${MVN_PROPERTIES}" + - mvn clean install ${MVN_PROPERTIES[@]} -DskipTests * Unit tests pass (${JAVA_VERSION}): $( ((UNIT_TEST_PASSED)) && echo "ok" || echo "failed" ) - - mvn package ${MVN_PROFILES} "${MVN_PROPERTIES}" -Dsurefire.rerunFailingTestsCount=3 + - mvn package ${MVN_PROFILES[@]} ${MVN_PROPERTIES[@]} -Dsurefire.rerunFailingTestsCount=3 __EOF if ((CHECKSUM_PASSED)) && ((SIGNATURE_PASSED)) && ((RAT_CHECK_PASSED)) && ((BUILD_FROM_SOURCE_PASSED)) && ((UNIT_TEST_PASSED)) ; then exit 0 From 9a1ecee33e92de5beb9ba9bcf5e70c9461e798c9 Mon Sep 17 00:00:00 2001 From: lixiaobao Date: Wed, 9 Dec 2020 21:34:36 +0800 Subject: [PATCH 573/769] HBASE-25372 Fix typo in ban-jersey section of the enforcer plugin in pom.xml (#2749) Signed-off-by: Wei-Chiu Chuang Signed-off-by: Duo Zhang --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index fe53e353b68f..feeeb7f7f49b 100755 --- a/pom.xml +++ b/pom.xml @@ -1200,8 +1200,8 @@ - >org.glassfish.jersey.containers:** - >org.glassfish.jersey.core:** + org.glassfish.jersey.containers:** + org.glassfish.jersey.core:** Use shaded jersey instead From fe86ce59579e5d7c36a95654ea7aabda2f10271e Mon Sep 17 00:00:00 2001 From: Huaxiang Sun Date: Mon, 30 Nov 2020 15:58:25 -0800 Subject: [PATCH 574/769] HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side. --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 + ...talogReplicaLoadBalanceSimpleSelector.java | 19 ++- ...talogReplicaLoadBalanceSimpleSelector.java | 132 ++++++++++++++++++ 4 files changed, 144 insertions(+), 11 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 2c2520f8bd12..1c686aca8b76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = 1; + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index c3ce868757f1..27be88a9def2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,6 +28,8 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { + int UNINITIALIZED_NUM_OF_REPLICAS = -1; + /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index bc8264050149..01996b34e2ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,7 +108,6 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; - private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -117,7 +116,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -146,7 +145,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -262,16 +261,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - if (newNumOfReplicas == 1) { - LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " - + "fetch the replica count", tableName); + // If the returned number of replicas is -1, it is caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. + if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + LOG.error("Failed to fetch Table {}'s region replica count", tableName); + return this.numOfReplicas; } - int cachedNumOfReplicas = this.numOfReplicas; - // If the returned number of replicas is 1, it is mostly caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. + int cachedNumOfReplicas = this.numOfReplicas; if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { + (cachedNumOfReplicas != newNumOfReplicas)) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java new file mode 100644 index 000000000000..6b14286f99ca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; +import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MediumTests.class, ClientTests.class }) +public class TestCatalogReplicaLoadBalanceSimpleSelector { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final Logger LOG = LoggerFactory.getLogger( + TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final int NB_SERVERS = 4; + private static int numOfMetaReplica = NB_SERVERS - 1; + + private static AsyncConnectionImpl CONN; + + private static ConnectionRegistry registry; + private static Admin admin; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + + TEST_UTIL.startMiniCluster(NB_SERVERS); + admin = TEST_UTIL.getAdmin(); + admin.balancerSwitch(false, true); + + // Enable hbase:meta replication. + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() >= numOfMetaReplica); + + registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + CONN = new AsyncConnectionImpl(conf, registry, + registry.getClusterId().get(), null, User.getCurrent()); + } + + @AfterClass + public static void tearDown() throws Exception { + IOUtils.closeQuietly(CONN); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMetaChangeFromReplicaNoReplica() throws IOException, InterruptedException { + String replicaSelectorClass = CONN.getConfiguration(). + get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + + CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get + (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + + assertNotEquals( + metaSelector.select(TableName.valueOf("test"), EMPTY_START_ROW, RegionLocateType.CURRENT), + RegionReplicaUtil.DEFAULT_REPLICA_ID); + + // Change to No meta replica + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() == 1); + + CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = + CatalogReplicaLoadBalanceSelectorFactory.createSelector( + replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get( + CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + assertEquals( + metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, + RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); + } +} From 12f097bb3bb7b86cac82779b9c5eeda9f7c0936f Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 9 Dec 2020 12:18:08 -0800 Subject: [PATCH 575/769] HBASE-25376 [create-release] Fix double .asc (#2755) Signed-off-by: Andrew Purtell --- dev-support/create-release/release-util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 4dffd672c2ae..f1f732a7727c 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -685,7 +685,7 @@ function maven_deploy { #inputs: maven_set_version "$RELEASE_VERSION" # Prepare for signing kick_gpg_agent - declare -a mvn_goals=(clean install) + declare -a mvn_goals=(clean) if ! is_dry_run; then mvn_goals=("${mvn_goals[@]}" deploy) fi From bbd305bd2dabba22f49a0f75fa411d925c8dbca4 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 9 Dec 2020 16:54:18 -0800 Subject: [PATCH 576/769] HBASE-25380 [create-release] Add timestamping to log output (#2758) Added logging of timestamp so we can tell where we are spending time. Added context to the README copied from head of entrance script. Signed-off-by: Andrew Purtell --- dev-support/create-release/README.txt | 32 ++++++++-- .../create-release/do-release-docker.sh | 30 +++++----- dev-support/create-release/release-build.sh | 30 +++++----- dev-support/create-release/release-util.sh | 59 ++++++++++--------- 4 files changed, 90 insertions(+), 61 deletions(-) diff --git a/dev-support/create-release/README.txt b/dev-support/create-release/README.txt index e696574507f9..aff562445e12 100644 --- a/dev-support/create-release/README.txt +++ b/dev-support/create-release/README.txt @@ -1,7 +1,31 @@ -Entrance script is _do-release-docker.sh_. Requires a local docker; -for example, on mac os x, Docker for Desktop installed and running. - -For usage, pass '-h': +Creates a HBase release candidate. The script will update versions, tag the branch, +build HBase binary packages and documentation, and upload maven artifacts to a staging +repository. There is also a dry run mode where only local builds are performed, and +nothing is uploaded to the ASF repos. + +Run with "-h" for options. For example, running below will do all +steps above using the 'rm' dir under Downloads as workspace: + + $ ./do-release-docker.sh -d ~/Downloads/rm + +The scripts in this directory came originally from spark +(https://github.com/apache/spark/tree/master/dev/create-release). They were then +modified to suite the hbase context. These scripts supercedes the old +../make_rc.sh script for making release candidates because what is here is more +comprehensive doing more steps of the RM process as well as running in a +container so the RM build environment can be a constant. + +It: + + * Tags release + * Sets version to the release version + * Sets version to next SNAPSHOT version. + * Builds, signs, and hashes all artifacts. + * Pushes release tgzs to the dev dir in a apache dist. + * Pushes to repository.apache.org staging. + +The entry point is here, in the do-release-docker.sh script. Requires a local +docker; for example, on mac os x, Docker for Desktop installed and running. $ ./do-release-docker.sh -h diff --git a/dev-support/create-release/do-release-docker.sh b/dev-support/create-release/do-release-docker.sh index b48581f9165b..cda814cfbf1b 100755 --- a/dev-support/create-release/do-release-docker.sh +++ b/dev-support/create-release/do-release-docker.sh @@ -76,7 +76,7 @@ Options: -s [step] runs a single step of the process; valid steps are: tag|publish-dist|publish-release. If none specified, runs tag, then publish-dist, and then publish-release. 'publish-snapshot' is also an allowed, less used, option. - -x debug. do less clean up. (env file, gpg forwarding on mac) + -x debug. Does less clean up (env file, gpg forwarding on mac) EOF exit 1 } @@ -147,7 +147,7 @@ done # We need to import that public key in the container in order to use the private key via the agent. GPG_KEY_FILE="$WORKDIR/gpg.key.public" -echo "Exporting public key for ${GPG_KEY}" +log "Exporting public key for ${GPG_KEY}" fcreate_secure "$GPG_KEY_FILE" $GPG "${GPG_ARGS[@]}" --export "${GPG_KEY}" > "${GPG_KEY_FILE}" @@ -155,10 +155,10 @@ function cleanup { local id banner "Release Cleanup" if is_debug; then - echo "skipping due to debug run" + log "skipping due to debug run" return 0 fi - echo "details in cleanup.log" + log "details in cleanup.log" if [ -f "${ENVFILE}" ]; then rm -f "$ENVFILE" fi @@ -186,7 +186,7 @@ function cleanup { trap cleanup EXIT -echo "Host OS: ${HOST_OS}" +log "Host OS: ${HOST_OS}" if [ "${HOST_OS}" == "DARWIN" ]; then run_silent "Building gpg-agent-proxy image with tag ${IMGTAG}..." "docker-proxy-build.log" \ docker build --build-arg "UID=${UID}" --build-arg "RM_USER=${USER}" \ @@ -198,7 +198,7 @@ run_silent "Building hbase-rm image with tag $IMGTAG..." "docker-build.log" \ --build-arg "RM_USER=${USER}" "$SELF/hbase-rm" banner "Final prep for container launch." -echo "Writing out environment for container." +log "Writing out environment for container." # Write the release information to a file with environment variables to be used when running the # image. ENVFILE="$WORKDIR/env.list" @@ -244,7 +244,7 @@ if [ -n "${GIT_REPO}" ]; then ;; # on the host but normally git wouldn't use the local optimization file://*) - echo "[INFO] converted file:// git repo to a local path, which changes git to assume --local." + log "Converted file:// git repo to a local path, which changes git to assume --local." GIT_REPO_MOUNT=(--mount "type=bind,src=${GIT_REPO#file://},dst=/opt/hbase-repo,consistency=delegated") echo "HOST_GIT_REPO=${GIT_REPO}" >> "${ENVFILE}" GIT_REPO="/opt/hbase-repo" @@ -286,8 +286,8 @@ fi GPG_PROXY_MOUNT=() if [ "${HOST_OS}" == "DARWIN" ]; then GPG_PROXY_MOUNT=(--mount "type=volume,src=gpgagent,dst=/home/${USER}/.gnupg/") - echo "Setting up GPG agent proxy container needed on OS X." - echo " we should clean this up for you. If that fails the container ID is below and in " \ + log "Setting up GPG agent proxy container needed on OS X." + log " we should clean this up for you. If that fails the container ID is below and in " \ "gpg-proxy.cid" #TODO the key pair used should be configurable docker run --rm -p 62222:22 \ @@ -301,8 +301,8 @@ if [ "${HOST_OS}" == "DARWIN" ]; then sort "${HOME}/.ssh/known_hosts" | comm -1 -3 - "${WORKDIR}/gpg-agent-proxy.ssh-keyscan" \ > "${WORKDIR}/gpg-agent-proxy.known_hosts" if [ -s "${WORKDIR}/gpg-agent-proxy.known_hosts" ]; then - echo "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." - echo "The following entry(ies) are missing:" + log "Your ssh known_hosts does not include the entries for the gpg-agent proxy container." + log "The following entry(ies) are missing:" sed -e 's/^/ /' "${WORKDIR}/gpg-agent-proxy.known_hosts" read -r -p "Okay to add these entries to ${HOME}/.ssh/known_hosts? [y/n] " ANSWER if [ "$ANSWER" != "y" ]; then @@ -310,8 +310,8 @@ if [ "${HOST_OS}" == "DARWIN" ]; then fi cat "${WORKDIR}/gpg-agent-proxy.known_hosts" >> "${HOME}/.ssh/known_hosts" fi - echo "Launching ssh reverse tunnel from the container to gpg agent." - echo " we should clean this up for you. If that fails the PID is in gpg-proxy.ssh.pid" + log "Launching ssh reverse tunnel from the container to gpg agent." + log " we should clean this up for you. If that fails the PID is in gpg-proxy.ssh.pid" ssh -p 62222 -R "/home/${USER}/.gnupg/S.gpg-agent:$(gpgconf --list-dir agent-extra-socket)" \ -i "${HOME}/.ssh/id_rsa" -N -n localhost >gpg-proxy.ssh.log 2>&1 & echo $! > "${WORKDIR}/gpg-proxy.ssh.pid" @@ -326,10 +326,10 @@ else fi banner "Building $RELEASE_TAG; output will be at $WORKDIR/output" -echo "We should clean the container up when we are done. If that fails then the container ID " \ +log "We should clean the container up when we are done. If that fails then the container ID " \ "is in release.cid" echo -# Where possible we specifcy "consistency=delegated" when we do not need host access during the +# Where possible we specify "consistency=delegated" when we do not need host access during the # build run. On Mac OS X specifically this gets us a big perf improvement. cmd=(docker run --rm -ti \ --env-file "$ENVFILE" \ diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index db28f6f08b42..44a594fff3d6 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -81,7 +81,7 @@ set -e function cleanup { # If REPO was set, then leave things be. Otherwise if we defined a repo clean it out. if [[ -z "${REPO}" ]] && [[ -n "${MAVEN_LOCAL_REPO}" ]]; then - echo "Cleaning up temp repo in '${MAVEN_LOCAL_REPO}'. Set REPO to reuse downloads." >&2 + log "Cleaning up temp repo in '${MAVEN_LOCAL_REPO}'. Set REPO to reuse downloads." >&2 rm -f "${MAVEN_SETTINGS_FILE}" &> /dev/null || true rm -rf "${MAVEN_LOCAL_REPO}" &> /dev/null || true fi @@ -142,7 +142,7 @@ if [[ "$1" == "tag" ]]; then git add RELEASENOTES.md CHANGES.md git commit -a -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" - echo "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" + log "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" git tag "$RELEASE_TAG" # Create next version @@ -159,7 +159,7 @@ if [[ "$1" == "tag" ]]; then else cd .. mv "${PROJECT}" "${PROJECT}.tag" - echo "Dry run: Clone with version changes and tag available as ${PROJECT}.tag in the output directory." + log "Dry run: Clone with version changes and tag available as ${PROJECT}.tag in the output directory." fi exit 0 fi @@ -186,7 +186,7 @@ fi cd "${PROJECT}" git checkout "$GIT_REF" git_hash="$(git rev-parse --short HEAD)" -echo "Checked out ${PROJECT} at ${GIT_REF} commit $git_hash" +log "Checked out ${PROJECT} at ${GIT_REF} commit $git_hash" if [ -z "${RELEASE_VERSION}" ]; then RELEASE_VERSION="$(maven_get_version)" @@ -210,7 +210,7 @@ cd .. if [[ "$1" == "publish-dist" ]]; then # Source and binary tarballs - echo "Packaging release source tarballs" + log "Packaging release source tarballs" make_src_release "${PROJECT}" "${RELEASE_VERSION}" # we do not have binary tarballs for hbase-thirdparty @@ -228,7 +228,7 @@ if [[ "$1" == "publish-dist" ]]; then rm -rf "${svn_target:?}/${DEST_DIR_NAME}" mkdir -p "$svn_target/${DEST_DIR_NAME}" - echo "Copying release tarballs" + log "Copying release tarballs" cp "${PROJECT}"-*.tar.* "$svn_target/${DEST_DIR_NAME}/" cp "${PROJECT}/CHANGES.md" "$svn_target/${DEST_DIR_NAME}/" cp "${PROJECT}/RELEASENOTES.md" "$svn_target/${DEST_DIR_NAME}/" @@ -241,6 +241,7 @@ if [[ "$1" == "publish-dist" ]]; then fi shopt -u nocasematch + log "svn add" svn add "$svn_target/${DEST_DIR_NAME}" if ! is_dry_run; then @@ -250,9 +251,10 @@ if [[ "$1" == "publish-dist" ]]; then rm -rf "$svn_target" else mv "$svn_target/${DEST_DIR_NAME}" "${svn_target}_${DEST_DIR_NAME}.dist" - echo "Dry run: svn-managed 'dist' directory with release tarballs, CHANGES.md and RELEASENOTES.md available as $(pwd)/${svn_target}_${DEST_DIR_NAME}.dist" + log "Dry run: svn-managed 'dist' directory with release tarballs, CHANGES.md and RELEASENOTES.md available as $(pwd)/${svn_target}_${DEST_DIR_NAME}.dist" rm -rf "$svn_target" fi + log "svn ci done" exit 0 fi @@ -261,13 +263,13 @@ if [[ "$1" == "publish-snapshot" ]]; then ( cd "${PROJECT}" mvn_log="${BASE_DIR}/mvn_deploy_snapshot.log" - echo "Publishing snapshot to nexus" + log "Publishing snapshot to nexus" maven_deploy snapshot "$mvn_log" if ! is_dry_run; then - echo "Snapshot artifacts successfully published to repo." + log "Snapshot artifacts successfully published to repo." rm "$mvn_log" else - echo "Dry run: Snapshot artifacts successfully built, but not published due to dry run." + log "Dry run: Snapshot artifacts successfully built, but not published due to dry run." fi ) exit $? @@ -277,16 +279,16 @@ if [[ "$1" == "publish-release" ]]; then ( cd "${PROJECT}" mvn_log="${BASE_DIR}/mvn_deploy_release.log" - echo "Staging release in nexus" + log "Staging release in nexus" maven_deploy release "$mvn_log" declare staged_repo_id="dryrun-no-repo" if ! is_dry_run; then staged_repo_id=$(grep -o "Closing staging repository with ID .*" "$mvn_log" \ | sed -e 's/Closing staging repository with ID "\([^"]*\)"./\1/') - echo "Release artifacts successfully published to repo ${staged_repo_id}" + log "Release artifacts successfully published to repo ${staged_repo_id}" rm "$mvn_log" else - echo "Dry run: Release artifacts successfully built, but not published due to dry run." + log "Dry run: Release artifacts successfully built, but not published due to dry run." fi # Dump out email to send. Where we find vote.tmpl depends # on where this script is run from @@ -300,5 +302,5 @@ fi set +x # done with detailed logging cd .. rm -rf "${PROJECT}" -echo "ERROR: expects to be called with 'tag', 'publish-dist', 'publish-release', or 'publish-snapshot'" >&2 +log "ERROR: expects to be called with 'tag', 'publish-dist', 'publish-release', or 'publish-snapshot'" >&2 exit_with_usage diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index f1f732a7727c..5f7224fded6b 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -29,7 +29,7 @@ PUBLISH_PROFILES=("-P" "apache-release,release") set -e function error { - echo "Error: $*" >&2 + log "Error: $*" >&2 exit 1 } @@ -54,10 +54,14 @@ function parse_version { function banner { local msg="$1" echo "========================" - echo "=== ${msg}" + log "${msg}" echo } +function log { + echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") ${1}" +} + # current number of seconds since epoch function get_ctime { date +"%s" @@ -71,17 +75,17 @@ function run_silent { local -i stop_time banner "${BANNER}" - echo "Command: $*" - echo "Log file: $LOG_FILE" + log "Command: $*" + log "Log file: $LOG_FILE" start_time="$(get_ctime)" if ! "$@" 1>"$LOG_FILE" 2>&1; then - echo "Command FAILED. Check full logs for details." + log "Command FAILED. Check full logs for details." tail "$LOG_FILE" exit 1 fi stop_time="$(get_ctime)" - echo "=== SUCCESS ($((stop_time - start_time)) seconds)" + log "SUCCESS ($((stop_time - start_time)) seconds)" } function fcreate_secure { @@ -147,7 +151,7 @@ function get_release_info { local version version="$(curl -s "$ASF_REPO_WEBUI;a=blob_plain;f=pom.xml;hb=refs/heads/$GIT_BRANCH" | parse_version)" - echo "Current branch VERSION is $version." + log "Current branch VERSION is $version." NEXT_VERSION="$version" RELEASE_VERSION="" @@ -199,7 +203,7 @@ function get_release_info { if git ls-remote --tags "$ASF_REPO" "$RELEASE_TAG" | grep -q "refs/tags/${RELEASE_TAG}$" ; then read -r -p "$RELEASE_TAG already exists. Continue anyway [y/n]? " ANSWER if [ "$ANSWER" != "y" ]; then - echo "Exiting." + log "Exiting." exit 1 fi SKIP_TAG=1 @@ -209,7 +213,7 @@ function get_release_info { GIT_REF="$RELEASE_TAG" if is_dry_run; then - echo "This is a dry run. If tag does not actually exist, please confirm the ref that will be built for testing." + log "This is a dry run. If tag does not actually exist, please confirm the ref that will be built for testing." GIT_REF="$(read_config "GIT_REF" "$GIT_REF")" fi export GIT_REF @@ -252,7 +256,7 @@ EOF read -r -p "Is this info correct [y/n]? " ANSWER if [ "$ANSWER" != "y" ]; then - echo "Exiting." + log "Exiting." exit 1 fi GPG_ARGS=("${GPG_ARGS[@]}" --local-user "${GPG_KEY}") @@ -279,7 +283,7 @@ function is_debug { function check_get_passwords { for env in "$@"; do if [ -z "${!env}" ]; then - echo "The environment variable $env is not set. Please enter the password or passphrase." + log "The environment variable $env is not set. Please enter the password or passphrase." echo # shellcheck disable=SC2229 stty -echo && printf "%s : " "$env" && read -r "$env" && printf '\n' && stty echo @@ -293,7 +297,7 @@ function check_needed_vars { local missing=0 for env in "$@"; do if [ -z "${!env}" ]; then - echo "$env must be set to run this script" + log "$env must be set to run this script" (( missing++ )) else # shellcheck disable=SC2163 @@ -322,7 +326,7 @@ function init_java { error "JAVA_HOME is not set." fi JAVA_VERSION=$("${JAVA_HOME}"/bin/javac -version 2>&1 | cut -d " " -f 2) - echo "java version: $JAVA_VERSION" + log "java version: $JAVA_VERSION" export JAVA_VERSION } @@ -330,7 +334,7 @@ function init_python { if ! [ -x "$(command -v python2)" ]; then error 'python2 needed by yetus. Install or add link? E.g: sudo ln -sf /usr/bin/python2.7 /usr/local/bin/python2' fi - echo "python version: $(python2 --version)" + log "python version: $(python2 --version)" } # Set MVN @@ -357,7 +361,7 @@ function init_yetus { fi # Work around yetus bug by asking test-patch for the version instead of rdm. YETUS_VERSION=$("${YETUS_HOME}/bin/test-patch" --version) - echo "Apache Yetus version ${YETUS_VERSION}" + log "Apache Yetus version ${YETUS_VERSION}" } function configure_maven { @@ -409,7 +413,7 @@ function git_clone_overwrite { if [[ -z "${GIT_REPO}" ]]; then asf_repo="gitbox.apache.org/repos/asf/${PROJECT}.git" - echo "[INFO] clone will be of the gitbox repo for ${PROJECT}." + log "Clone will be of the gitbox repo for ${PROJECT}." if [ -n "${ASF_USERNAME}" ] && [ -n "${ASF_PASSWORD}" ]; then # Ugly! encoded_username=$(python -c "import urllib; print urllib.quote('''$ASF_USERNAME''', '')") @@ -419,7 +423,7 @@ function git_clone_overwrite { GIT_REPO="https://${asf_repo}" fi else - echo "[INFO] clone will be of provided git repo." + log "Clone will be of provided git repo." fi # N.B. we use the shared flag because the clone is short lived and if a local repo repo was # given this will let us refer to objects there directly instead of hardlinks or copying. @@ -440,7 +444,7 @@ function start_step { if [ -z "${name}" ]; then name="${FUNCNAME[1]}" fi - echo "$(date -u +'%Y-%m-%dT%H:%M:%SZ') ${name} start" >&2 + log "${name} start" >&2 get_ctime } @@ -452,7 +456,7 @@ function stop_step { name="${FUNCNAME[1]}" fi stop_time="$(get_ctime)" - echo "$(date -u +'%Y-%m-%dT%H:%M:%SZ') ${name} stop ($((stop_time - start_time)) seconds)" + log "${name} stop ($((stop_time - start_time)) seconds)" } # Writes report into cwd! @@ -488,7 +492,7 @@ function get_jira_name { if [[ -z "$jira_name" ]]; then error "Sorry, can't determine the Jira name for project $project" fi - echo "$jira_name" + log "$jira_name" } # Update the CHANGES.md @@ -625,7 +629,7 @@ make_binary_release() { done else cd .. || exit - echo "No ${f_bin_prefix}*-bin.tar.gz product; expected?" + log "No ${f_bin_prefix}*-bin.tar.gz product; expected?" fi stop_step "${timing_token}" @@ -648,7 +652,7 @@ function kick_gpg_agent { # Do maven command to set version into local pom function maven_set_version { #input: local this_version="$1" - echo "${MVN[@]}" versions:set -DnewVersion="$this_version" + log "${MVN[@]}" versions:set -DnewVersion="$this_version" "${MVN[@]}" versions:set -DnewVersion="$this_version" | grep -v "no value" # silence logs } @@ -679,8 +683,8 @@ function maven_deploy { #inputs: fi # Publish ${PROJECT} to Maven repo # shellcheck disable=SC2154 - echo "Publishing ${PROJECT} checkout at '$GIT_REF' ($git_hash)" - echo "Publish version is $RELEASE_VERSION" + log "Publishing ${PROJECT} checkout at '$GIT_REF' ($git_hash)" + log "Publish version is $RELEASE_VERSION" # Coerce the requested version maven_set_version "$RELEASE_VERSION" # Prepare for signing @@ -689,9 +693,8 @@ function maven_deploy { #inputs: if ! is_dry_run; then mvn_goals=("${mvn_goals[@]}" deploy) fi - echo "${MVN[@]}" -DskipTests -Dcheckstyle.skip=true "${PUBLISH_PROFILES[@]}" \ - "${mvn_goals[@]}" - echo "Logging to ${mvn_log_file}. This will take a while..." + log "${MVN[@]}" -DskipTests -Dcheckstyle.skip=true "${PUBLISH_PROFILES[@]}" "${mvn_goals[@]}" + log "Logging to ${mvn_log_file}. This will take a while..." rm -f "$mvn_log_file" # The tortuous redirect in the next command allows mvn's stdout and stderr to go to mvn_log_file, # while also sending stderr back to the caller. @@ -700,7 +703,7 @@ function maven_deploy { #inputs: "${mvn_goals[@]}" 1>> "$mvn_log_file" 2> >( tee -a "$mvn_log_file" >&2 ); then error "Deploy build failed, for details see log at '$mvn_log_file'." fi - echo "BUILD SUCCESS." + log "BUILD SUCCESS." stop_step "${timing_token}" return 0 } From c2dc1fc3c5339ac5e72e3e78ab46983cd2316bd6 Mon Sep 17 00:00:00 2001 From: haxl Date: Thu, 10 Dec 2020 22:07:06 +0800 Subject: [PATCH 577/769] HBASE-25334 TestRSGroupsFallback.testFallback is flaky (#2728) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/master/ServerManager.java | 3 ++- .../org/apache/hadoop/hbase/master/TestDeadServer.java | 9 +++++++-- .../hadoop/hbase/rsgroup/TestRSGroupsFallback.java | 10 +++++++--- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 9e666c56a890..8977174edba7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -501,7 +501,8 @@ public DeadServer getDeadServers() { * @return true if any RS are being processed as dead, false if not */ public boolean areDeadServersInProgress() throws IOException { - return master.getProcedures().stream().anyMatch(p -> p instanceof ServerCrashProcedure); + return master.getProcedures().stream() + .anyMatch(p -> !p.isFinished() && p instanceof ServerCrashProcedure); } void letRegionServersShutdown() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java index 39163ab1de08..b0d6cb63ea76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java @@ -25,6 +25,7 @@ import java.util.Set; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; @@ -93,15 +94,19 @@ public static void tearDownAfterClass() throws Exception { } @Test - public void testCrashProcedureReplay() throws IOException { + public void testCrashProcedureReplay() throws Exception { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); final ProcedureExecutor pExecutor = master.getMasterProcedureExecutor(); ServerCrashProcedure proc = new ServerCrashProcedure( pExecutor.getEnvironment(), hostname123, false, false); + pExecutor.stop(); ProcedureTestingUtility.submitAndWait(pExecutor, proc); - assertTrue(master.getServerManager().areDeadServersInProgress()); + + ProcedureTestingUtility.restart(pExecutor); + ProcedureTestingUtility.waitProcedure(pExecutor, proc); + assertFalse(master.getServerManager().areDeadServersInProgress()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java index ea5e226b7f6b..478ffc654757 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java @@ -25,12 +25,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RSGroupTests; import org.apache.hadoop.hbase.util.Bytes; @@ -61,6 +63,7 @@ public class TestRSGroupsFallback extends TestRSGroupsBase { public static void setUp() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean(RSGroupBasedLoadBalancer.FALLBACK_GROUP_ENABLE_KEY, true); + conf.setInt(HConstants.HBASE_BALANCER_MAX_BALANCING, 0); setUpTestBeforeClass(); MASTER.balanceSwitch(true); } @@ -103,7 +106,7 @@ public void testFallback() throws Exception { // add a new server to default group, regions move to default group TEST_UTIL.getMiniHBaseCluster().startRegionServerAndWait(60000); - MASTER.balance(); + assertTrue(MASTER.balance()); assertRegionsInGroup(tableName, RSGroupInfo.DEFAULT_GROUP); // add a new server to test group, regions move back @@ -111,14 +114,15 @@ public void testFallback() throws Exception { TEST_UTIL.getMiniHBaseCluster().startRegionServerAndWait(60000); ADMIN.moveServersToRSGroup( Collections.singleton(t.getRegionServer().getServerName().getAddress()), groupName); - MASTER.balance(); + assertTrue(MASTER.balance()); assertRegionsInGroup(tableName, groupName); TEST_UTIL.deleteTable(tableName); } private void assertRegionsInGroup(TableName table, String group) throws IOException { - TEST_UTIL.waitUntilAllRegionsAssigned(table); + ProcedureTestingUtility.waitAllProcedures( + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); RSGroupInfo rsGroup = ADMIN.getRSGroup(group); MASTER.getAssignmentManager().getRegionStates().getRegionsOfTable(table).forEach(region -> { Address regionOnServer = MASTER.getAssignmentManager().getRegionStates() From 9596bce8f753f57e232e107c934ea12b59030785 Mon Sep 17 00:00:00 2001 From: haxl Date: Thu, 10 Dec 2020 22:15:39 +0800 Subject: [PATCH 578/769] HBASE-25287 Forgetting to unbuffer streams results in many CLOSE_WAIT sockets when loading files (#2699) Signed-off-by: Andrew Purtell Signed-off-by: Duo Zhang --- .../hadoop/hbase/io/hfile/HFileInfo.java | 52 +++++++++++-------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 330ef6fed003..5d65ff3b3a39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -342,8 +342,8 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr Path path = context.getFilePath(); checkFileVersion(path); this.hfileContext = createHFileContext(path, trailer, conf); - } catch (Throwable t) { context.getInputStreamWrapper().unbuffer(); + } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper()); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); @@ -355,28 +355,36 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr */ public void initMetaAndIndex(HFile.Reader reader) throws IOException { ReaderContext context = reader.getContext(); - HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); - // Initialize an block iterator, and parse load-on-open blocks in the following. - blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), - context.getFileSize() - trailer.getTrailerSize()); - // Data index. We also read statistics about the block index written after - // the root level. - this.dataIndexReader = new HFileBlockIndex - .CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); - dataIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), - trailer.getDataIndexCount()); - reader.setDataBlockIndexReader(dataIndexReader); - // Meta index. - this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); - metaIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), + try { + HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); + // Initialize an block iterator, and parse load-on-open blocks in the following. + blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), + context.getFileSize() - trailer.getTrailerSize()); + // Data index. We also read statistics about the block index written after + // the root level. + this.dataIndexReader = + new HFileBlockIndex.CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); + dataIndexReader + .readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + reader.setDataBlockIndexReader(dataIndexReader); + // Meta index. + this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); + metaIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getMetaIndexCount()); - reader.setMetaBlockIndexReader(metaIndexReader); - loadMetaInfo(blockIter, hfileContext); - reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this)); - // Load-On-Open info - HFileBlock b; - while ((b = blockIter.nextBlock()) != null) { - loadOnOpenBlocks.add(b); + reader.setMetaBlockIndexReader(metaIndexReader); + loadMetaInfo(blockIter, hfileContext); + reader.setDataBlockEncoder(HFileDataBlockEncoderImpl.createFromFileInfo(this)); + // Load-On-Open info + HFileBlock b; + while ((b = blockIter.nextBlock()) != null) { + loadOnOpenBlocks.add(b); + } + // close the block reader + context.getInputStreamWrapper().unbuffer(); + } catch (Throwable t) { + IOUtils.closeQuietly(context.getInputStreamWrapper()); + throw new CorruptHFileException("Problem reading data index and meta index from file " + + context.getFilePath(), t); } } From 9175215f922781134c8e9beb35e4290fc233dfac Mon Sep 17 00:00:00 2001 From: Adam <37170106+hsiangawang@users.noreply.github.com> Date: Thu, 10 Dec 2020 08:28:21 -0600 Subject: [PATCH 579/769] HBASE-25370 Fix flaky test TestClassFinder#testClassFinderDefaultsToOwnPackage (#2740) Signed-off-by: Duo Zhang --- hbase-common/pom.xml | 5 +++++ .../test/java/org/apache/hadoop/hbase/TestClassFinder.java | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 48b3c0b7eded..8b9154156ba5 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -211,6 +211,11 @@ compile true + + org.hamcrest + hamcrest-library + test + org.mockito mockito-core diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java index b1c090322479..411bb65a95a5 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -295,7 +297,10 @@ public void testClassFinderDefaultsToOwnPackage() throws Exception { Set> pkgClasses = allClassesFinder.findClasses( ClassFinder.class.getPackage().getName(), false); Set> defaultClasses = allClassesFinder.findClasses(false); - assertArrayEquals(pkgClasses.toArray(), defaultClasses.toArray()); + Object[] pkgClassesArray = pkgClasses.toArray(); + Object[] defaultClassesArray = defaultClasses.toArray(); + assertEquals(pkgClassesArray.length, defaultClassesArray.length); + assertThat(pkgClassesArray, arrayContainingInAnyOrder(defaultClassesArray)); } private static class FileAndPath { From fba6d4cea92e98ce075db96bde42601ec4922093 Mon Sep 17 00:00:00 2001 From: Huaxiang Sun Date: Thu, 10 Dec 2020 10:12:53 -0800 Subject: [PATCH 580/769] Revert "HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side." This reverts commit c1aa3b24e930e2c47ff4d7f6e286cb450458dffc. --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 - ...talogReplicaLoadBalanceSimpleSelector.java | 19 +-- ...talogReplicaLoadBalanceSimpleSelector.java | 132 ------------------ 4 files changed, 11 insertions(+), 144 deletions(-) delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 1c686aca8b76..2c2520f8bd12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + int numOfReplicas = 1; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index 27be88a9def2..c3ce868757f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,8 +28,6 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { - int UNINITIALIZED_NUM_OF_REPLICAS = -1; - /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index 01996b34e2ef..bc8264050149 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,6 +108,7 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; + private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -116,7 +117,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -145,7 +146,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -261,16 +262,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - // If the returned number of replicas is -1, it is caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. - if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { - LOG.error("Failed to fetch Table {}'s region replica count", tableName); - return this.numOfReplicas; + if (newNumOfReplicas == 1) { + LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " + + "fetch the replica count", tableName); } - int cachedNumOfReplicas = this.numOfReplicas; + + // If the returned number of replicas is 1, it is mostly caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - (cachedNumOfReplicas != newNumOfReplicas)) { + ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java deleted file mode 100644 index 6b14286f99ca..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Category({ MediumTests.class, ClientTests.class }) -public class TestCatalogReplicaLoadBalanceSimpleSelector { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); - - private static final Logger LOG = LoggerFactory.getLogger( - TestCatalogReplicaLoadBalanceSimpleSelector.class); - - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - - private static final int NB_SERVERS = 4; - private static int numOfMetaReplica = NB_SERVERS - 1; - - private static AsyncConnectionImpl CONN; - - private static ConnectionRegistry registry; - private static Admin admin; - - @BeforeClass - public static void setUp() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - - TEST_UTIL.startMiniCluster(NB_SERVERS); - admin = TEST_UTIL.getAdmin(); - admin.balancerSwitch(false, true); - - // Enable hbase:meta replication. - HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( - TableName.META_TABLE_NAME).size() >= numOfMetaReplica); - - registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); - CONN = new AsyncConnectionImpl(conf, registry, - registry.getClusterId().get(), null, User.getCurrent()); - } - - @AfterClass - public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testMetaChangeFromReplicaNoReplica() throws IOException, InterruptedException { - String replicaSelectorClass = CONN.getConfiguration(). - get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, - CatalogReplicaLoadBalanceSimpleSelector.class.getName()); - - CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory - .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get - (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); - - assertNotEquals( - metaSelector.select(TableName.valueOf("test"), EMPTY_START_ROW, RegionLocateType.CURRENT), - RegionReplicaUtil.DEFAULT_REPLICA_ID); - - // Change to No meta replica - HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( - TableName.META_TABLE_NAME).size() == 1); - - CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = - CatalogReplicaLoadBalanceSelectorFactory.createSelector( - replicaSelectorClass, META_TABLE_NAME, CONN, () -> { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get( - CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); - assertEquals( - metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, - RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); - } -} From a99c156b9af63623b3663189dcd60cc26f8fd937 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 11 Dec 2020 08:22:49 +0800 Subject: [PATCH 581/769] HBASE-25370 Addendum fix checkstyle issue and dependencies --- .../src/test/java/org/apache/hadoop/hbase/TestClassFinder.java | 1 - pom.xml | 2 -- 2 files changed, 3 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java index 411bb65a95a5..cf97e313cf0c 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java @@ -19,7 +19,6 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; diff --git a/pom.xml b/pom.xml index feeeb7f7f49b..f23d0db549b9 100755 --- a/pom.xml +++ b/pom.xml @@ -2286,13 +2286,11 @@ org.hamcrest hamcrest-core ${hamcrest.version} - test org.hamcrest hamcrest-library ${hamcrest.version} - test org.mockito From 59cc5af5c293ee56e8a02d96efccf4834b5991e1 Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Sat, 12 Dec 2020 21:10:33 +0800 Subject: [PATCH 582/769] =?UTF-8?q?HBASE-23340=20hmaster=20/hbase/replicat?= =?UTF-8?q?ion/rs=20session=20expired=20(hbase=20repl=E2=80=A6=20(#2739)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/master/HMaster.java | 6 ++-- .../hbase/master/cleaner/LogCleaner.java | 5 ++-- .../master/ReplicationLogCleaner.java | 26 ++++++++++++----- .../hbase/master/cleaner/TestLogsCleaner.java | 28 ++++++++++++++----- .../region/TestMasterRegionWALCleaner.java | 2 +- 5 files changed, 47 insertions(+), 20 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 7d29ed668823..a61254f56101 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1336,17 +1336,17 @@ private void startServiceThreads() throws IOException { // Create cleaner thread pool cleanerPool = new DirScanPool(conf); + Map params = new HashMap<>(); + params.put(MASTER, this); // Start log cleaner thread int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); this.logCleaner = new LogCleaner(cleanerInterval, this, conf, - getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool); + getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), cleanerPool, params); getChoreService().scheduleChore(logCleaner); // start the hfile archive cleaner thread Path archiveDir = HFileArchiveUtil.getArchivePath(conf); - Map params = new HashMap<>(); - params.put(MASTER, this); this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(), archiveDir, cleanerPool, params); getChoreService().scheduleChore(hfileCleaner); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java index f65713ebf263..d8993b38ffef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; @@ -72,9 +73,9 @@ public class LogCleaner extends CleanerChore * @param pool the thread pool used to scan directories */ public LogCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs, - Path oldLogDir, DirScanPool pool) { + Path oldLogDir, DirScanPool pool, Map params) { super("LogsCleaner", period, stopper, conf, fs, oldLogDir, HBASE_MASTER_LOGCLEANER_PLUGINS, - pool); + pool, params); this.pendingDelete = new LinkedBlockingQueue<>(); int size = conf.getInt(OLD_WALS_CLEANER_THREAD_SIZE, DEFAULT_OLD_WALS_CLEANER_THREAD_SIZE); this.oldWALsCleaner = createOldWalsCleaner(size); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index 8f016bcb9124..a7821f1894a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -19,16 +19,19 @@ import java.io.IOException; import java.util.Collections; +import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +46,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationLogCleaner extends BaseLogCleanerDelegate { private static final Logger LOG = LoggerFactory.getLogger(ReplicationLogCleaner.class); - private ZKWatcher zkw; + private ZKWatcher zkw = null; + private boolean shareZK = false; private ReplicationQueueStorage queueStorage; private boolean stopped = false; private Set wals; @@ -92,12 +96,20 @@ public boolean apply(FileStatus file) { } @Override - public void setConf(Configuration config) { - // Make my own Configuration. Then I'll have my own connection to zk that - // I can close myself when comes time. - Configuration conf = new Configuration(config); + public void init(Map params) { + super.init(params); try { - setConf(conf, new ZKWatcher(conf, "replicationLogCleaner", null)); + if (MapUtils.isNotEmpty(params)) { + Object master = params.get(HMaster.MASTER); + if (master != null && master instanceof HMaster) { + zkw = ((HMaster) master).getZooKeeper(); + shareZK = true; + } + } + if (zkw == null) { + zkw = new ZKWatcher(getConf(), "replicationLogCleaner", null); + } + this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); } catch (IOException e) { LOG.error("Error while configuring " + this.getClass().getName(), e); } @@ -126,7 +138,7 @@ public void setConf(Configuration conf, ZKWatcher zk, public void stop(String why) { if (this.stopped) return; this.stopped = true; - if (this.zkw != null) { + if (!shareZK && this.zkw != null) { LOG.info("Stopping " + this.zkw); this.zkw.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 8ed31d009fb7..064f9a657623 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -205,7 +205,7 @@ public void testLogCleaning() throws Exception { // 10 procedure WALs assertEquals(10, fs.listStatus(OLD_PROCEDURE_WALS_DIR).length); - LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, OLD_WALS_DIR, POOL); + LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, OLD_WALS_DIR, POOL, null); cleaner.chore(); // In oldWALs we end up with the current WAL, a newer WAL, the 3 old WALs which @@ -226,7 +226,7 @@ public void testLogCleaning() throws Exception { } @Test - public void testZooKeeperAbortDuringGetListOfReplicators() throws Exception { + public void testZooKeeperRecoveryDuringGetListOfReplicators() throws Exception { ReplicationLogCleaner cleaner = new ReplicationLogCleaner(); List dummyFiles = Arrays.asList( @@ -239,7 +239,7 @@ public void testZooKeeperAbortDuringGetListOfReplicators() throws Exception { final AtomicBoolean getListOfReplicatorsFailed = new AtomicBoolean(false); try { - faultyZK.init(); + faultyZK.init(false); ReplicationQueueStorage queueStorage = spy(ReplicationStorageFactory .getReplicationQueueStorage(faultyZK, conf)); doAnswer(new Answer() { @@ -263,6 +263,18 @@ public Object answer(InvocationOnMock invocation) throws Throwable { assertTrue(getListOfReplicatorsFailed.get()); assertFalse(toDelete.iterator().hasNext()); assertFalse(cleaner.isStopped()); + + //zk recovery. + faultyZK.init(true); + cleaner.preClean(); + Iterable filesToDelete = cleaner.getDeletableFiles(dummyFiles); + Iterator iter = filesToDelete.iterator(); + assertTrue(iter.hasNext()); + assertEquals(new Path("log1"), iter.next().getPath()); + assertTrue(iter.hasNext()); + assertEquals(new Path("log2"), iter.next().getPath()); + assertFalse(iter.hasNext()); + } finally { faultyZK.close(); } @@ -306,7 +318,7 @@ public void testOnConfigurationChange() throws Exception { Server server = new DummyServer(); FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); - LogCleaner cleaner = new LogCleaner(3000, server, conf, fs, OLD_WALS_DIR, POOL); + LogCleaner cleaner = new LogCleaner(3000, server, conf, fs, OLD_WALS_DIR, POOL, null); int size = cleaner.getSizeOfCleaners(); assertEquals(LogCleaner.DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC, cleaner.getCleanerThreadTimeoutMsec()); @@ -426,10 +438,12 @@ public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable a super(conf, identifier, abortable); } - public void init() throws Exception { + public void init(boolean autoRecovery) throws Exception { this.zk = spy(super.getRecoverableZooKeeper()); - doThrow(new KeeperException.ConnectionLossException()) - .when(zk).getChildren("/hbase/replication/rs", null); + if (!autoRecovery) { + doThrow(new KeeperException.ConnectionLossException()) + .when(zk).getChildren("/hbase/replication/rs", null); + } } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java index 08b5f9951906..39497b07e52f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java @@ -72,7 +72,7 @@ public void stop(String why) { public boolean isStopped() { return stopped; } - }, conf, fs, globalWALArchiveDir, cleanerPool); + }, conf, fs, globalWALArchiveDir, cleanerPool, null); choreService.scheduleChore(logCleaner); } From a155a44ea38e47843a8127c88d349d711e05d2fc Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 15 Dec 2020 09:49:16 -0800 Subject: [PATCH 583/769] HBASE-25389 [Flakey Tests] branch-2 TestMetaShutdownHandler (#2773) Signed-off-by: Bharath Vissapragada --- .../apache/hadoop/hbase/master/TestMetaShutdownHandler.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java index 742734e0af23..d4c19335dd65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -102,6 +103,7 @@ public void testExpireMetaRegionServer() throws Exception { RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); assertEquals("Wrong state for meta!", RegionState.State.OPEN, metaState.getState()); assertNotEquals("Meta is on master!", metaServerName, master.getServerName()); + HRegionServer metaRegionServer = cluster.getRegionServer(metaServerName); // Delete the ephemeral node of the meta-carrying region server. // This is trigger the expire of this region server on the master. @@ -113,6 +115,7 @@ public void testExpireMetaRegionServer() throws Exception { // Wait for SSH to finish final ServerManager serverManager = master.getServerManager(); final ServerName priorMetaServerName = metaServerName; + TEST_UTIL.waitFor(60000, 100, () -> metaRegionServer.isStopped()); TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { From 4e4d58cb13e85c842c3e1084cb16a9a7ac4d7914 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Tue, 15 Dec 2020 23:56:02 +0530 Subject: [PATCH 584/769] HBASE-25378 Legacy comparator in Hfile trailer will fail to load (#2756) Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/io/hfile/FixedFileTrailer.java | 2 ++ .../hadoop/hbase/io/hfile/TestFixedFileTrailer.java | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 7ab4edb438a6..6a2dcf926a4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -612,6 +612,8 @@ private static Class getComparatorClass(String compara comparatorKlass = CellComparatorImpl.class; } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { comparatorKlass = MetaCellComparator.class; } else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index a5215bfe1d94..6382a0d74701 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -130,6 +130,11 @@ public void testCreateComparator() throws IOException { t.createComparator(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()).getClass()); assertEquals(MetaCellComparator.class, t.createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass()); + assertEquals(MetaCellComparator.class, + t.createComparator("org.apache.hadoop.hbase.CellComparator$MetaCellComparator").getClass()); + assertEquals(MetaCellComparator.class, + t.createComparator("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator") + .getClass()); assertEquals(MetaCellComparator.class, t.createComparator( MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); assertEquals(MetaCellComparator.META_COMPARATOR.getClass(), t.createComparator( @@ -139,7 +144,8 @@ public void testCreateComparator() throws IOException { assertNull(t.createComparator(Bytes.BYTES_RAWCOMPARATOR.getClass().getName())); assertNull(t.createComparator("org.apache.hadoop.hbase.KeyValue$RawBytesComparator")); } catch (IOException e) { - fail("Unexpected exception while testing FixedFileTrailer#createComparator()"); + fail("Unexpected exception while testing FixedFileTrailer#createComparator(), " + + e.getMessage()); } // Test an invalid comparatorClassName From b99006ee09901286b61a6d0f0880955d622cb7c9 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Wed, 16 Dec 2020 12:25:59 +0800 Subject: [PATCH 585/769] HBASE-25365 The log in move_servers_rsgroup is incorrect (#2742) Signed-off-by: stack --- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 35 +++++++++---------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 038e4dfc0e06..9850917e795d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -954,27 +954,28 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { * located there. * @param movedServers the servers that are moved to new group * @param srcGrpServers all servers in the source group, excluding the movedServers - * @param targetGroup the target group + * @param targetGroupName the target group + * @param sourceGroupName the source group * @throws IOException if moving the server and tables fail */ private void moveServerRegionsFromGroup(Set
    movedServers, Set
    srcGrpServers, - RSGroupInfo targetGroup) throws IOException { - moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroup, rs -> getRegions(rs), - info -> { + String targetGroupName, String sourceGroupName) throws IOException { + moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroupName, sourceGroupName, + rs -> getRegions(rs), info -> { try { String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); - return groupName.equals(targetGroup.getName()); + return groupName.equals(targetGroupName); } catch (IOException e) { LOG.warn("Failed to test group for region {} and target group {}", info, - targetGroup.getName()); + targetGroupName); return false; } }); } private void moveRegionsBetweenGroups(Set regionsOwners, Set
    newRegionsOwners, - RSGroupInfo targetGrp, Function> getRegionsInfo, + String targetGroupName, String sourceGroupName, Function> getRegionsInfo, Function validation) throws IOException { // Get server names corresponding to given Addresses List movedServerNames = new ArrayList<>(regionsOwners.size()); @@ -1001,7 +1002,7 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new for (RegionInfo region : getRegionsInfo.apply((T) owner.getAddress())) { if (!validation.apply(region)) { LOG.info("Moving region {}, which do not belong to RSGroup {}", - region.getShortNameToLog(), targetGrp.getName()); + region.getShortNameToLog(), targetGroupName); // Move region back to source RSGroup servers ServerName dest = masterServices.getLoadBalancer().randomAssignment(region, srcGrpServerNames); @@ -1015,17 +1016,16 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new assignmentFutures.add(Pair.newPair(region, future)); } catch (IOException ioe) { failedRegions.add(region.getRegionNameAsString()); - LOG.debug("Move region {} from group failed, will retry, current retry time is {}", + LOG.debug("Move region {} failed, will retry, current retry time is {}", region.getShortNameToLog(), retry, ioe); toThrow = ioe; } } } } - waitForRegionMovement(assignmentFutures, failedRegions, targetGrp.getName(), retry); + waitForRegionMovement(assignmentFutures, failedRegions, sourceGroupName, retry); if (failedRegions.isEmpty()) { - LOG.info("All regions from server(s) {} moved to target group {}.", movedServerNames, - targetGrp.getName()); + LOG.info("All regions from {} are moved back to {}", movedServerNames, sourceGroupName); return; } else { try { @@ -1043,7 +1043,7 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new if (!failedRegions.isEmpty()) { // print failed moved regions, for later process conveniently String msg = String - .format("move regions for group %s failed, failed regions: %s", targetGrp.getName(), + .format("move regions for group %s failed, failed regions: %s", sourceGroupName, failedRegions); LOG.error(msg); throw new DoNotRetryIOException( @@ -1056,9 +1056,9 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
    new * completion even if some region movement fails. */ private void waitForRegionMovement(List>> regionMoveFutures, - Set failedRegions, String tgtGrpName, int retryCount) { + Set failedRegions, String sourceGroupName, int retryCount) { LOG.info("Moving {} region(s) to group {}, current retry={}", regionMoveFutures.size(), - tgtGrpName, retryCount); + sourceGroupName, retryCount); for (Pair> pair : regionMoveFutures) { try { pair.getSecond().get(); @@ -1073,7 +1073,7 @@ private void waitForRegionMovement(List>> region } catch (Exception e) { failedRegions.add(pair.getFirst().getRegionNameAsString()); LOG.error("Move region {} to group {} failed, will retry on next attempt", - pair.getFirst().getShortNameToLog(), tgtGrpName, e); + pair.getFirst().getShortNameToLog(), sourceGroupName, e); } } } @@ -1225,7 +1225,6 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE if (StringUtils.isEmpty(targetGroupName)) { throw new ConstraintException("RSGroup cannot be null."); } - RSGroupInfo targetGroup = getRSGroupInfo(targetGroupName); // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. @@ -1270,7 +1269,7 @@ public void moveServers(Set
    servers, String targetGroupName) throws IOE // MovedServers may be < passed in 'servers'. Set
    movedServers = moveServers(servers, srcGrp.getName(), targetGroupName); - moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroup); + moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroupName, srcGrp.getName()); LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); } } From 5e99b448abcf1dd4e0b8295ba007e61a7f207019 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Tue, 15 Dec 2020 21:45:39 -0800 Subject: [PATCH 586/769] HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side. (#2768) Signed-off-by: stack --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 + ...talogReplicaLoadBalanceSimpleSelector.java | 19 ++- ...talogReplicaLoadBalanceSimpleSelector.java | 132 ++++++++++++++++++ 4 files changed, 144 insertions(+), 11 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 2c2520f8bd12..1c686aca8b76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = 1; + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index c3ce868757f1..27be88a9def2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,6 +28,8 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { + int UNINITIALIZED_NUM_OF_REPLICAS = -1; + /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index bc8264050149..01996b34e2ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,7 +108,6 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; - private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -117,7 +116,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -146,7 +145,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -262,16 +261,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - if (newNumOfReplicas == 1) { - LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " - + "fetch the replica count", tableName); + // If the returned number of replicas is -1, it is caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. + if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + LOG.error("Failed to fetch Table {}'s region replica count", tableName); + return this.numOfReplicas; } - int cachedNumOfReplicas = this.numOfReplicas; - // If the returned number of replicas is 1, it is mostly caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. + int cachedNumOfReplicas = this.numOfReplicas; if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { + (cachedNumOfReplicas != newNumOfReplicas)) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java new file mode 100644 index 000000000000..6b14286f99ca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; +import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MediumTests.class, ClientTests.class }) +public class TestCatalogReplicaLoadBalanceSimpleSelector { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final Logger LOG = LoggerFactory.getLogger( + TestCatalogReplicaLoadBalanceSimpleSelector.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final int NB_SERVERS = 4; + private static int numOfMetaReplica = NB_SERVERS - 1; + + private static AsyncConnectionImpl CONN; + + private static ConnectionRegistry registry; + private static Admin admin; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + + TEST_UTIL.startMiniCluster(NB_SERVERS); + admin = TEST_UTIL.getAdmin(); + admin.balancerSwitch(false, true); + + // Enable hbase:meta replication. + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() >= numOfMetaReplica); + + registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + CONN = new AsyncConnectionImpl(conf, registry, + registry.getClusterId().get(), null, User.getCurrent()); + } + + @AfterClass + public static void tearDown() throws Exception { + IOUtils.closeQuietly(CONN); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMetaChangeFromReplicaNoReplica() throws IOException, InterruptedException { + String replicaSelectorClass = CONN.getConfiguration(). + get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + + CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get + (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + + assertNotEquals( + metaSelector.select(TableName.valueOf("test"), EMPTY_START_ROW, RegionLocateType.CURRENT), + RegionReplicaUtil.DEFAULT_REPLICA_ID); + + // Change to No meta replica + HBaseTestingUtility.setReplicas(admin, TableName.META_TABLE_NAME, 1); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( + TableName.META_TABLE_NAME).size() == 1); + + CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = + CatalogReplicaLoadBalanceSelectorFactory.createSelector( + replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get( + CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + assertEquals( + metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, + RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); + } +} From 7857639b38f3c8051072681851d1b825cf575e60 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Tue, 15 Dec 2020 21:52:54 -0800 Subject: [PATCH 587/769] HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) (#2753) Signed-off-by: stack --- .../hbase/client/RawAsyncHBaseAdmin.java | 87 ++++++++++--------- .../hadoop/hbase/client/RegionInfo.java | 18 +++- .../hadoop/hbase/client/TestAdmin1.java | 19 ++++ 3 files changed, 82 insertions(+), 42 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 512e7a96aa6d..7823963c4099 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2388,51 +2388,56 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - try { - CompletableFuture> future; - if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { - String encodedName = Bytes.toString(regionNameOrEncodedRegionName); - if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { - // old format encodedName, should be meta region - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); - } else { - future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, - regionNameOrEncodedRegionName); - } + + CompletableFuture> future; + if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { + String encodedName = Bytes.toString(regionNameOrEncodedRegionName); + if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { + // old format encodedName, should be meta region + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); } else { - RegionInfo regionInfo = - CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); - if (regionInfo.isMetaRegion()) { - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); - } else { - future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); - } + future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, + regionNameOrEncodedRegionName); + } + } else { + // Not all regionNameOrEncodedRegionName here is going to be a valid region name, + // it needs to throw out IllegalArgumentException in case tableName is passed in. + RegionInfo regionInfo; + try { + regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( + regionNameOrEncodedRegionName); + } catch (IOException ioe) { + throw new IllegalArgumentException(ioe.getMessage()); } - CompletableFuture returnedFuture = new CompletableFuture<>(); - addListener(future, (location, err) -> { - if (err != null) { - returnedFuture.completeExceptionally(err); - return; - } - if (!location.isPresent() || location.get().getRegion() == null) { - returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); - } else { - returnedFuture.complete(location.get()); - } - }); - return returnedFuture; - } catch (IOException e) { - return failedFuture(e); + if (regionInfo.isMetaRegion()) { + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); + } else { + future = + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + } } + + CompletableFuture returnedFuture = new CompletableFuture<>(); + addListener(future, (location, err) -> { + if (err != null) { + returnedFuture.completeExceptionally(err); + return; + } + if (!location.isPresent() || location.get().getRegion() == null) { + returnedFuture.completeExceptionally( + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); + } else { + returnedFuture.complete(location.get()); + } + }); + return returnedFuture; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index d7460e9d15ef..b6bdd0103de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,7 +363,23 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; + if (parseRegionNameOrReturnNull(regionName) == null) { + if (regionName.length > MD5_HEX_LENGTH) { + return false; + } else if (regionName.length == MD5_HEX_LENGTH) { + return true; + } else { + String encodedName = Bytes.toString(regionName); + try { + Integer.parseInt(encodedName); + // If this is a valid integer, it could be hbase:meta's encoded region name. + return true; + } catch(NumberFormatException er) { + return false; + } + } + } + return false; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index a0ed836f9c75..cfd61d295f89 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -99,6 +99,25 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { assertTrue(exception instanceof TableNotFoundException); } + @Test + public void testCompactATableWithSuperLongTableName() throws Exception { + TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + try { + ADMIN.createTable(htd); + try { + ADMIN.majorCompactRegion(tableName.getName()); + ADMIN.majorCompactRegion(Bytes.toBytes("abcd")); + } catch (IllegalArgumentException iae) { + LOG.info("This is expected"); + } + } finally { + ADMIN.disableTable(tableName); + ADMIN.deleteTable(tableName); + } + } + @Test public void testCompactionTimestamps() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); From 685e780b38b9f4df5eb50220dc26bd4d0b6da14a Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Wed, 16 Dec 2020 18:46:21 +0530 Subject: [PATCH 588/769] HBASE-25246 Backup/Restore hbase cell tags Closes #2745 Signed-off-by: Anoop Sam John Signed-off-by: Viraj Jasani --- .../hbase/shaded/protobuf/ProtobufUtil.java | 70 ++++-- .../shaded/protobuf/TestProtobufUtil.java | 103 +++++++- .../apache/hadoop/hbase/mapreduce/Import.java | 5 +- .../hbase/mapreduce/ResultSerialization.java | 4 +- .../hbase/mapreduce/TestImportExport.java | 222 ++++++++++++++++++ .../hadoop/hbase/codec/MessageCodec.java | 2 +- src/main/asciidoc/_chapters/ops_mgt.adoc | 6 + 7 files changed, 391 insertions(+), 21 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index b9a08676f8ee..462ffb012d93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -1436,6 +1436,21 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final Result result) { + return toResult(result, false); + } + + /** + * Convert a client Result to a protocol buffer Result + * @param result the client Result to convert + * @param encodeTags whether to includeTags in converted protobuf result or not + * When @encodeTags is set to true, it will return all the tags in the response. + * These tags may contain some sensitive data like acl permissions, etc. + * Only the tools like Export, Import which needs to take backup needs to set + * it to true so that cell tags are persisted in backup. + * Refer to HBASE-25246 for more context. + * @return the converted protocol buffer Result + */ + public static ClientProtos.Result toResult(final Result result, boolean encodeTags) { if (result.getExists() != null) { return toResult(result.getExists(), result.isStale()); } @@ -1447,7 +1462,7 @@ public static ClientProtos.Result toResult(final Result result) { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); for (Cell c : cells) { - builder.addCell(toCell(c)); + builder.addCell(toCell(c, encodeTags)); } builder.setStale(result.isStale()); @@ -1494,6 +1509,22 @@ public static ClientProtos.Result toResultNoData(final Result result) { * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto) { + return toResult(proto, false); + } + + /** + * Convert a protocol buffer Result to a client Result + * + * @param proto the protocol buffer Result to convert + * @param decodeTags whether to decode tags into converted client Result + * When @decodeTags is set to true, it will decode all the tags from the + * response. These tags may contain some sensitive data like acl permissions, + * etc. Only the tools like Export, Import which needs to take backup needs to + * set it to true so that cell tags are persisted in backup. + * Refer to HBASE-25246 for more context. + * @return the converted client Result + */ + public static Result toResult(final ClientProtos.Result proto, boolean decodeTags) { if (proto.hasExists()) { if (proto.getStale()) { return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; @@ -1509,7 +1540,7 @@ public static Result toResult(final ClientProtos.Result proto) { List cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); for (CellProtos.Cell c : values) { - cells.add(toCell(builder, c)); + cells.add(toCell(builder, c, decodeTags)); } return Result.create(cells, null, proto.getStale(), proto.getPartial()); } @@ -1552,7 +1583,7 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); for (CellProtos.Cell c: values) { - cells.add(toCell(builder, c)); + cells.add(toCell(builder, c, false)); } } @@ -2000,7 +2031,7 @@ public static void toIOException(ServiceException se) throws IOException { throw new IOException(se); } - public static CellProtos.Cell toCell(final Cell kv) { + public static CellProtos.Cell toCell(final Cell kv, boolean encodeTags) { // Doing this is going to kill us if we do it for all data passed. // St.Ack 20121205 CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder(); @@ -2015,7 +2046,10 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(wrap(((ByteBufferExtendedCell) kv).getValueByteBuffer(), ((ByteBufferExtendedCell) kv).getValuePosition(), kv.getValueLength())); - // TODO : Once tags become first class then we may have to set tags to kvbuilder. + if (encodeTags) { + kvbuilder.setTags(wrap(((ByteBufferExtendedCell) kv).getTagsByteBuffer(), + ((ByteBufferExtendedCell) kv).getTagsPosition(), kv.getTagsLength())); + } } else { kvbuilder.setRow( UnsafeByteOperations.unsafeWrap(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); @@ -2027,6 +2061,10 @@ public static CellProtos.Cell toCell(final Cell kv) { kvbuilder.setTimestamp(kv.getTimestamp()); kvbuilder.setValue(UnsafeByteOperations.unsafeWrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + if (encodeTags) { + kvbuilder.setTags(UnsafeByteOperations.unsafeWrap(kv.getTagsArray(), kv.getTagsOffset(), + kv.getTagsLength())); + } } return kvbuilder.build(); } @@ -2038,15 +2076,19 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { return UnsafeByteOperations.unsafeWrap(dup); } - public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - return cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()) - .build(); + public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, + boolean decodeTags) { + ExtendedCellBuilder builder = cellBuilder.clear() + .setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()) + .setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()) + .setType((byte) cell.getCellType().getNumber()) + .setValue(cell.getValue().toByteArray()); + if (decodeTags && cell.hasTags()) { + builder.setTags(cell.getTags().toByteArray()); + } + return builder.build(); } public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 7d6eda817cfa..c47150b04858 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -18,17 +18,24 @@ package org.apache.hadoop.hbase.shaded.protobuf; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -63,7 +70,8 @@ public class TestProtobufUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestProtobufUtil.class); - + private static final String TAG_STR = "tag-1"; + private static final byte TAG_TYPE = (byte)10; public TestProtobufUtil() { } @@ -271,9 +279,10 @@ public void testToCell() { ByteBuffer dbb = ByteBuffer.allocateDirect(arr.length); dbb.put(arr); ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); - CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV); + CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV, false); Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell); + ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, + false); assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); } @@ -479,4 +488,92 @@ public void testRegionLockInfo() { + "\"sharedLockCount\":0" + "}]", lockJson); } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encode/decode tags is set to true. + */ + @Test + public void testCellConversionWithTags() { + + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, true); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(1, decodedTags.size()); + Tag decodedTag = decodedTags.get(0); + assertEquals(TAG_TYPE, decodedTag.getType()); + assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); + } + + private Cell getCellWithTags() { + Tag tag = new ArrayBackedTag(TAG_TYPE, TAG_STR); + ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + cellBuilder.setRow(Bytes.toBytes("row1")); + cellBuilder.setFamily(Bytes.toBytes("f1")); + cellBuilder.setQualifier(Bytes.toBytes("q1")); + cellBuilder.setValue(Bytes.toBytes("value1")); + cellBuilder.setType(Cell.Type.Delete); + cellBuilder.setTags(Collections.singletonList(tag)); + return cellBuilder.build(); + } + + private Cell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTags) { + ExtendedCellBuilder decodedBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + return ProtobufUtil.toCell(decodedBuilder, protoCell, decodeTags); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encode/decode tags is set to false. + */ + @Test + public void testCellConversionWithoutTags() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, false); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encoding of tags is set to false + * and decoding of tags is set to true. + */ + @Test + public void testTagEncodeFalseDecodeTrue() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, false); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, true); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } + + /** + * Test {@link ProtobufUtil#toCell(Cell, boolean)} and + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion + * methods when it contains tags and encoding of tags is set to true + * and decoding of tags is set to false. + */ + @Test + public void testTagEncodeTrueDecodeFalse() { + Cell cell = getCellWithTags(); + CellProtos.Cell protoCell = ProtobufUtil.toCell(cell, true); + assertNotNull(protoCell); + + Cell decodedCell = getCellFromProtoResult(protoCell, false); + List decodedTags = PrivateCellUtil.getTags(decodedCell); + assertEquals(0, decodedTags.size()); + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 239a12bdc688..30071fdfd809 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -511,6 +512,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { + List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -524,7 +526,8 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength()); // value length + kv.getValueLength(), // value length + tags.size() == 0 ? null: tags); } } return kv; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index dac1d425d806..9fdaa7b78f75 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -128,7 +128,7 @@ public void close() throws IOException { @Override public Result deserialize(Result mutation) throws IOException { ClientProtos.Result proto = ClientProtos.Result.parseDelimitedFrom(in); - return ProtobufUtil.toResult(proto); + return ProtobufUtil.toResult(proto, true); } @Override @@ -152,7 +152,7 @@ public void open(OutputStream out) throws IOException { @Override public void serialize(Result result) throws IOException { - ProtobufUtil.toResult(result).writeDelimitedTo(out); + ProtobufUtil.toResult(result, true).writeDelimitedTo(out); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 12060a742a2b..7b38c59c9387 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY; +import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -34,10 +36,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -46,10 +51,14 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -58,11 +67,18 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.CellImporter; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -117,6 +133,9 @@ public class TestImportExport { private static final long now = System.currentTimeMillis(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final String TEST_ATTR = "source_op"; + public static final String TEST_TAG = "test_tag"; @BeforeClass public static void beforeClass() throws Throwable { @@ -801,4 +820,207 @@ public boolean isWALVisited() { return isVisited; } } + + /** + * Add cell tags to delete mutations, run export and import tool and + * verify that tags are present in import table also. + * @throws Throwable throws Throwable. + */ + @Test + public void testTagsAddition() throws Throwable { + final TableName exportTable = TableName.valueOf(name.getMethodName()); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(desc); + + Table exportT = UTIL.getConnection().getTable(exportTable); + + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + exportT.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + exportT.delete(d); + + // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool + // will use KeyValueCodecWithTags. + String[] args = new String[] { + "-D" + ExportUtils.RAW_SCAN + "=true", + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + exportTable.getNameAsString(), + FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); + // Assert tag exists in exportTable + checkWhetherTagExists(exportTable, true); + + // Create an import table with MetadataController. + final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); + TableDescriptor importTableDesc = TableDescriptorBuilder + .newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(importTableDesc); + + // Run import tool. + args = new String[] { + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + importTable.getNameAsString(), + FQ_OUTPUT_DIR + }; + assertTrue(runImport(args)); + // Make sure that tags exists in imported table. + checkWhetherTagExists(importTable, true); + } + + private void checkWhetherTagExists(TableName table, boolean tagExists) throws IOException { + List values = new ArrayList<>(); + for (HRegion region : UTIL.getHBaseCluster().getRegions(table)) { + Scan scan = new Scan(); + // Make sure to set rawScan to true so that we will get Delete Markers. + scan.setRaw(true); + scan.readAllVersions(); + scan.withStartRow(ROW1); + // Need to use RegionScanner instead of table#getScanner since the latter will + // not return tags since it will go through rpc layer and remove tags intentionally. + RegionScanner scanner = region.getScanner(scan); + scanner.next(values); + if (!values.isEmpty()) { + break; + } + } + boolean deleteFound = false; + for (Cell cell: values) { + if (PrivateCellUtil.isDelete(cell.getType().getCode())) { + deleteFound = true; + List tags = PrivateCellUtil.getTags(cell); + // If tagExists flag is true then validate whether tag contents are as expected. + if (tagExists) { + Assert.assertEquals(1, tags.size()); + for (Tag tag : tags) { + Assert.assertEquals(TEST_TAG, Tag.getValueAsString(tag)); + } + } else { + // If tagExists flag is disabled then check for 0 size tags. + assertEquals(0, tags.size()); + } + } + } + Assert.assertTrue(deleteFound); + } + + /* + This co-proc will add a cell tag to delete mutation. + */ + public static class MetadataController implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) + throws IOException { + if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { + return; + } + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (!(m instanceof Delete)) { + continue; + } + byte[] sourceOpAttr = m.getAttribute(TEST_ATTR); + if (sourceOpAttr == null) { + continue; + } + Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); + List updatedCells = new ArrayList<>(); + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + Cell cell = cellScanner.current(); + List tags = PrivateCellUtil.getTags(cell); + tags.add(sourceOpTag); + Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + updatedCells.add(updatedCell); + } + m.getFamilyCellMap().clear(); + // Clear and add new Cells to the Mutation. + for (Cell cell : updatedCells) { + Delete d = (Delete) m; + d.add(cell); + } + } + } + } + + /** + * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string + * This means it will use no Codec. Make sure that we don't return Tags in response. + * @throws Exception Exception + */ + @Test + public void testTagsWithEmptyCodec() throws Exception { + TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor tableDesc = TableDescriptorBuilder + .newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE) + .build()) + .setCoprocessor(MetadataController.class.getName()) + .build(); + UTIL.getAdmin().createTable(tableDesc); + Configuration conf = new Configuration(UTIL.getConfiguration()); + conf.set(RPC_CODEC_CONF_KEY, ""); + conf.set(DEFAULT_CODEC_CLASS, ""); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { + //Add first version of QUAL + Put p = new Put(ROW1); + p.addColumn(FAMILYA, QUAL, now, QUAL); + table.put(p); + + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + // Add test attribute to delete mutation. + d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); + table.delete(d); + + // Since RPC_CODEC_CONF_KEY and DEFAULT_CODEC_CLASS is set to empty, it will use + // empty Codec and it shouldn't encode/decode tags. + Scan scan = new Scan().withStartRow(ROW1).setRaw(true); + ResultScanner scanner = table.getScanner(scan); + int count = 0; + Result result; + while ((result = scanner.next()) != null) { + List cells = result.listCells(); + assertEquals(2, cells.size()); + Cell cell = cells.get(0); + assertTrue(CellUtil.isDelete(cell)); + List tags = PrivateCellUtil.getTags(cell); + assertEquals(0, tags.size()); + count++; + } + assertEquals(1, count); + } finally { + UTIL.deleteTable(tableName); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index 4b266e2bda7a..ddbbb5fc8bdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -74,7 +74,7 @@ static class MessageDecoder extends BaseDecoder { @Override protected Cell parseCell() throws IOException { - return ProtobufUtil.toCell(cellBuilder, CellProtos.Cell.parseDelimitedFrom(this.in)); + return ProtobufUtil.toCell(cellBuilder, CellProtos.Cell.parseDelimitedFrom(this.in), false); } } diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index e491cbc95b54..2c5a3d413c47 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -735,6 +735,9 @@ specifying column families and applying filters during the export. By default, the `Export` tool only exports the newest version of a given cell, regardless of the number of versions stored. To export more than one version, replace *__* with the desired number of versions. +For mapreduce based Export, if you want to export cell tags then set the following config property +`hbase.client.rpc.codec` to `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags` + Note: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration. [[import]] @@ -755,6 +758,9 @@ To import 0.94 exported files in a 0.96 cluster or onwards, you need to set syst $ bin/hbase -Dhbase.import.version=0.94 org.apache.hadoop.hbase.mapreduce.Import ---- +If you want to import cell tags then set the following config property +`hbase.client.rpc.codec` to `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags` + [[importtsv]] === ImportTsv From e967e7bd016a85fd19e25794f2bab45f82f7b8e8 Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 15 Dec 2020 21:02:45 -0800 Subject: [PATCH 589/769] HBASE-25400 [Flakey Tests] branch-2 TestRegionMoveAndAbandon --- .../hbase/master/assignment/TestRegionMoveAndAbandon.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java index 45e9d01972d6..cc8335aa25af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java @@ -103,8 +103,9 @@ public void test() throws Exception { LOG.info("Killing RS {}", rs1.getServerName()); // Stop RS1 cluster.killRegionServer(rs1.getServerName()); + UTIL.waitFor(30_000, () -> rs1.isStopped() && !rs1.isAlive()); // Region should get moved to RS2 - UTIL.waitTableAvailable(tableName, 30_000); + UTIL.waitTableAvailable(tableName, 60_000); // Restart the master LOG.info("Killing master {}", cluster.getMaster().getServerName()); cluster.killMaster(cluster.getMaster().getServerName()); From cfaed9d7828c7c2371d1da635cb3e7edcfbfecc9 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 16 Dec 2020 11:31:54 -0800 Subject: [PATCH 590/769] Add entry for 2.4.0 to downloads.xml --- src/site/xdoc/downloads.xml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index bbd60e5e2e4a..fe6f3d8d198e 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -43,6 +43,29 @@ under the License. Download Notices + + + 2.4.0 + + + 2020/12/15 + + + 2.4.0 vs 2.3.0 + + + Changes + + + Release Notes + + + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) + + + 2.3.3 From ef89c89c5a50a5f05a1137ff57652fb7adb5eec3 Mon Sep 17 00:00:00 2001 From: Sandeep Pal Date: Fri, 18 Dec 2020 13:23:00 +0530 Subject: [PATCH 591/769] HBASE-25383: Ability to update and remove peer base config Closes #2778 Signed-off-by: Bharath Vissapragada Signed-off-by: Geoffrey Jacoby Signed-off-by: Viraj Jasani --- .../ReplicationPeerConfigUtil.java | 34 +++--- .../replication/ReplicationPeerConfig.java | 6 + .../ReplicationPeerConfigBuilder.java | 9 ++ .../TestZKReplicationPeerStorage.java | 67 +++++++++-- .../replication/ReplicationPeerManager.java | 4 +- .../replication/TestMasterReplication.java | 107 +++++++++++++----- 6 files changed, 174 insertions(+), 53 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index c5dcd762e96f..05343eae4ccd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -27,7 +27,6 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -40,12 +39,12 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -246,7 +245,7 @@ public static Map> convert2Map(ReplicationProtos.TableCF /** * @param bytes Content of a peer znode. * @return ClusterKey parsed from the passed bytes. - * @throws DeserializationException + * @throws DeserializationException deserialization exception */ public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException { @@ -390,7 +389,7 @@ public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig pe } /** - * @param peerConfig + * @param peerConfig peer config of replication peer * @return Serialized protobuf of peerConfig with pb magic prefix prepended suitable * for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under * /hbase/replication/peers/PEER_ID @@ -454,37 +453,42 @@ public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig( } /** - * Helper method to add base peer configs from Configuration to ReplicationPeerConfig - * if not present in latter. + * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig * * This merges the user supplied peer configuration * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs * provided as property hbase.replication.peer.base.configs in hbase configuration. - * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1". Original value - * of conf is retained if already present in ReplicationPeerConfig. + * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". + * If value is empty, it will remove the existing key-value from peer config. * * @param conf Configuration * @return ReplicationPeerConfig containing updated configs. */ - public static ReplicationPeerConfig addBasePeerConfigsIfNotPresent(Configuration conf, + public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configuration conf, ReplicationPeerConfig receivedPeerConfig) { - String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); ReplicationPeerConfigBuilder copiedPeerConfigBuilder = ReplicationPeerConfig. newBuilder(receivedPeerConfig); - Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + Map receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); if (basePeerConfigs.length() != 0) { Map basePeerConfigMap = Splitter.on(';').trimResults().omitEmptyStrings() .withKeyValueSeparator("=").split(basePeerConfigs); - for (Map.Entry entry : basePeerConfigMap.entrySet()) { + for (Map.Entry entry : basePeerConfigMap.entrySet()) { String configName = entry.getKey(); String configValue = entry.getValue(); - // Only override if base config does not exist in existing peer configs - if (!receivedPeerConfigMap.containsKey(configName)) { + // If the config is provided with empty value, for eg. k1="", + // we remove it from peer config. Providing config with empty value + // is required so that it doesn't remove any other config unknowingly. + if (Strings.isNullOrEmpty(configValue)) { + copiedPeerConfigBuilder.removeConfiguration(configName); + } else if (!receivedPeerConfigMap.getOrDefault(configName, "").equals(configValue)) { + // update the configuration if exact config and value doesn't exists copiedPeerConfigBuilder.putConfiguration(configName, configValue); } } } + return copiedPeerConfigBuilder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index aba703ccdee8..bb3ff042ca06 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -294,6 +294,12 @@ public ReplicationPeerConfigBuilder putConfiguration(String key, String value) { return this; } + @Override + public ReplicationPeerConfigBuilder removeConfiguration(String key) { + this.configuration.remove(key); + return this; + } + @Override public ReplicationPeerConfigBuilder putPeerData(byte[] key, byte[] value) { this.peerData.put(key, value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index 58ff220e5631..c6a97fad9e81 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -52,6 +52,15 @@ public interface ReplicationPeerConfigBuilder { @InterfaceAudience.Private ReplicationPeerConfigBuilder putConfiguration(String key, String value); + /** + * Removes a "raw" configuration property for this replication peer. For experts only. + * @param key Configuration property key to ve removed + * @return {@code this} + */ + @InterfaceAudience.Private + ReplicationPeerConfigBuilder removeConfiguration(String key); + + /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index e7ee1e7c4835..18b0c121e67e 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -26,7 +26,6 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - import java.io.IOException; import java.util.HashMap; import java.util.Iterator; @@ -35,7 +34,6 @@ import java.util.Random; import java.util.Set; import java.util.stream.Stream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtility; @@ -45,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -73,6 +72,11 @@ public static void tearDown() throws IOException { UTIL.shutdownMiniZKCluster(); } + @After + public void cleanCustomConfigurations() { + UTIL.getConfiguration().unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + private Set randNamespaces(Random rand) { return Stream.generate(() -> Long.toHexString(rand.nextLong())).limit(rand.nextInt(5)) .collect(toSet()); @@ -220,8 +224,7 @@ public void testNoSyncReplicationState() STORAGE.getNewSyncReplicationStateNode(peerId))); } - @Test - public void testBaseReplicationPeerConfig() { + @Test public void testBaseReplicationPeerConfig() throws ReplicationException{ String customPeerConfigKey = "hbase.xxx.custom_config"; String customPeerConfigValue = "test"; String customPeerConfigUpdatedValue = "testUpdated"; @@ -241,7 +244,7 @@ public void testBaseReplicationPeerConfig() { concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,existingReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). @@ -249,17 +252,63 @@ public void testBaseReplicationPeerConfig() { assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). get(customPeerConfigSecondKey)); - // validates base configs does not override value if config already present + // validates base configs get updated values even if config already present + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,updatedReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigSecondKey)); } + + @Test public void testBaseReplicationRemovePeerConfig() throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + String customPeerConfigValue = "test"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat(customPeerConfigValue)); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + + // validates base configs are present in replicationPeerConfig + assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). + get(customPeerConfigKey)); + + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + + assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); + } + + @Test public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() + throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 2c930e103fc8..add51210a38f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -233,7 +233,7 @@ public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean ena // this should be a retry, just return return; } - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build(); SyncReplicationState syncReplicationState = copiedPeerConfig.isSyncReplication() ? SyncReplicationState.DOWNGRADE_ACTIVE @@ -547,7 +547,7 @@ public static ReplicationPeerManager create(ZKWatcher zk, Configuration conf, St for (String peerId : peerStorage.listPeerIds()) { ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId); - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); peerStorage.updatePeerConfig(peerId, peerConfig); boolean enabled = peerStorage.isPeerEnabled(peerId); SyncReplicationState state = peerStorage.getPeerSyncReplicationState(peerId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index b7e5edd649b2..b2e0e6d4860e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; - import java.io.Closeable; import java.io.IOException; import java.util.Arrays; @@ -29,7 +28,6 @@ import java.util.Optional; import java.util.Random; import java.util.concurrent.CountDownLatch; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -200,8 +198,8 @@ public void testHFileCyclicReplication() throws Exception { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -212,10 +210,10 @@ public void testHFileCyclicReplication() throws Exception { // Load 200 rows for each hfile range in cluster '1' and validate whether its been replicated // to cluster '0'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1] }; + hfileRanges.length * numOfRows + expectedCounts[1] }; loadAndValidateHFileReplication("testHFileCyclicReplication_10", 1, new int[] { 0 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -314,12 +312,12 @@ public void testHFileMultiSlaveReplication() throws Exception { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, - new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, + new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; int numOfRows = 100; int[] expectedCounts = - new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; + new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_0", 0, new int[] { 1 }, row, famName, htables, hfileRanges, numOfRows, expectedCounts, true); @@ -335,11 +333,11 @@ public void testHFileMultiSlaveReplication() throws Exception { // Load 200 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1' and '2'. Previous data should be replicated to cluster '2'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("ssss"), Bytes.toBytes("uuuu") }, - new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; + new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; + hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_1", 0, new int[] { 1, 2 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -370,8 +368,8 @@ public void testHFileReplicationForConfiguredTableCfs() throws Exception { // Load 100 rows for each hfile range in cluster '0' for table CF 'f' byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -381,11 +379,11 @@ public void testHFileReplicationForConfiguredTableCfs() throws Exception { // Load 100 rows for each hfile range in cluster '0' for table CF 'f1' hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 100; int[] newExpectedCounts = - new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; + new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; loadAndValidateHFileReplication("load_f1", 0, new int[] { 1 }, row, famName1, htables, hfileRanges, numOfRows, newExpectedCounts, false); @@ -449,7 +447,7 @@ public void testCyclicReplication3() throws Exception { * */ @Test - public void testBasePeerConfigsForPeerMutations() + public void testBasePeerConfigsForReplicationPeer() throws Exception { LOG.info("testBasePeerConfigsForPeerMutations"); String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; @@ -502,18 +500,15 @@ public void testBasePeerConfigsForPeerMutations() utilities[0].restartHBaseCluster(1); admin = utilities[0].getAdmin(); - // Both retains the value of base configuration 1 value as before restart. - // Peer 1 (Update value), Peer 2 (Base Value) - Assert.assertEquals(firstCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("1"). + // Configurations should be updated after restart again + Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). getConfiguration().get(firstCustomPeerConfigKey)); Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). getConfiguration().get(firstCustomPeerConfigKey)); - // Peer 1 gets new base config as part of restart. Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). getConfiguration().get(secondCustomPeerConfigKey)); - // Peer 2 retains the updated value as before restart. - Assert.assertEquals(secondCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("2"). + Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). getConfiguration().get(secondCustomPeerConfigKey)); } finally { shutDownMiniClusters(); @@ -521,6 +516,64 @@ public void testBasePeerConfigsForPeerMutations() } } + @Test + public void testBasePeerConfigsRemovalForReplicationPeer() + throws Exception { + LOG.info("testBasePeerConfigsForPeerMutations"); + String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; + String firstCustomPeerConfigValue = "test"; + + try { + baseConfiguration.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat(firstCustomPeerConfigValue)); + startMiniClusters(2); + addPeer("1", 0, 1); + Admin admin = utilities[0].getAdmin(); + + // Validates base configs 1 is present for both peer. + Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). + getConfiguration().get(firstCustomPeerConfigKey)); + + utilities[0].getConfiguration().unset(ReplicationPeerConfigUtil. + HBASE_REPLICATION_PEER_BASE_CONFIG); + utilities[0].getConfiguration().set(ReplicationPeerConfigUtil. + HBASE_REPLICATION_PEER_BASE_CONFIG, firstCustomPeerConfigKey.concat("=").concat("")); + + + utilities[0].shutdownMiniHBaseCluster(); + utilities[0].restartHBaseCluster(1); + admin = utilities[0].getAdmin(); + + // Configurations should be removed after restart again + Assert.assertNull(admin.getReplicationPeerConfig("1") + .getConfiguration().get(firstCustomPeerConfigKey)); + } finally { + shutDownMiniClusters(); + baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + } + + @Test + public void testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer() + throws Exception { + LOG.info("testBasePeerConfigsForPeerMutations"); + String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; + + try { + baseConfiguration.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat("")); + startMiniClusters(2); + addPeer("1", 0, 1); + Admin admin = utilities[0].getAdmin(); + + Assert.assertNull("Config should not be there", admin.getReplicationPeerConfig("1"). + getConfiguration().get(firstCustomPeerConfigKey)); + } finally { + shutDownMiniClusters(); + baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + } + @After public void tearDown() throws IOException { configurations = null; @@ -743,11 +796,11 @@ private void rollWALAndWait(final HBaseTestingUtility utility, final TableName t // listen for successful log rolls final WALActionsListener listener = new WALActionsListener() { - @Override - public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { - latch.countDown(); - } - }; + @Override + public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { + latch.countDown(); + } + }; region.getWAL().registerWALActionsListener(listener); // request a roll From 872164f5acdb2fbf09258045f8f3ff0ee7862e6f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 19 Dec 2020 20:28:30 +0800 Subject: [PATCH 592/769] Revert "HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) (#2753)" This reverts commit c3276801256aa16a62e5cdba7a37d4e18d59e880. --- .../hbase/client/RawAsyncHBaseAdmin.java | 87 +++++++++---------- .../hadoop/hbase/client/RegionInfo.java | 18 +--- .../hadoop/hbase/client/TestAdmin1.java | 19 ---- 3 files changed, 42 insertions(+), 82 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 7823963c4099..512e7a96aa6d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2388,56 +2388,51 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - - CompletableFuture> future; - if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { - String encodedName = Bytes.toString(regionNameOrEncodedRegionName); - if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { - // old format encodedName, should be meta region - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); + try { + CompletableFuture> future; + if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { + String encodedName = Bytes.toString(regionNameOrEncodedRegionName); + if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { + // old format encodedName, should be meta region + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); + } else { + future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, + regionNameOrEncodedRegionName); + } } else { - future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, - regionNameOrEncodedRegionName); - } - } else { - // Not all regionNameOrEncodedRegionName here is going to be a valid region name, - // it needs to throw out IllegalArgumentException in case tableName is passed in. - RegionInfo regionInfo; - try { - regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( - regionNameOrEncodedRegionName); - } catch (IOException ioe) { - throw new IllegalArgumentException(ioe.getMessage()); + RegionInfo regionInfo = + CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); + if (regionInfo.isMetaRegion()) { + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); + } else { + future = + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + } } - if (regionInfo.isMetaRegion()) { - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); - } else { - future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); - } + CompletableFuture returnedFuture = new CompletableFuture<>(); + addListener(future, (location, err) -> { + if (err != null) { + returnedFuture.completeExceptionally(err); + return; + } + if (!location.isPresent() || location.get().getRegion() == null) { + returnedFuture.completeExceptionally( + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); + } else { + returnedFuture.complete(location.get()); + } + }); + return returnedFuture; + } catch (IOException e) { + return failedFuture(e); } - - CompletableFuture returnedFuture = new CompletableFuture<>(); - addListener(future, (location, err) -> { - if (err != null) { - returnedFuture.completeExceptionally(err); - return; - } - if (!location.isPresent() || location.get().getRegion() == null) { - returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); - } else { - returnedFuture.complete(location.get()); - } - }); - return returnedFuture; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index b6bdd0103de8..d7460e9d15ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,23 +363,7 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - if (parseRegionNameOrReturnNull(regionName) == null) { - if (regionName.length > MD5_HEX_LENGTH) { - return false; - } else if (regionName.length == MD5_HEX_LENGTH) { - return true; - } else { - String encodedName = Bytes.toString(regionName); - try { - Integer.parseInt(encodedName); - // If this is a valid integer, it could be hbase:meta's encoded region name. - return true; - } catch(NumberFormatException er) { - return false; - } - } - } - return false; + return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index cfd61d295f89..a0ed836f9c75 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -99,25 +99,6 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { assertTrue(exception instanceof TableNotFoundException); } - @Test - public void testCompactATableWithSuperLongTableName() throws Exception { - TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); - try { - ADMIN.createTable(htd); - try { - ADMIN.majorCompactRegion(tableName.getName()); - ADMIN.majorCompactRegion(Bytes.toBytes("abcd")); - } catch (IllegalArgumentException iae) { - LOG.info("This is expected"); - } - } finally { - ADMIN.disableTable(tableName); - ADMIN.deleteTable(tableName); - } - } - @Test public void testCompactionTimestamps() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); From 2351af5b11dda683c600b09543e0213f5005f37e Mon Sep 17 00:00:00 2001 From: Akshay Sudheer <74921542+AkshayTSudheer@users.noreply.github.com> Date: Sun, 20 Dec 2020 02:07:21 +0530 Subject: [PATCH 593/769] HBASE-25404 Procedures table Id under master web UI gets word break to single character (#2783) Signed-off-by: Duo Zhang --- .../src/main/resources/hbase-webapps/master/procedures.jsp | 2 +- .../src/main/resources/hbase-webapps/static/css/hbase.css | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp index 8e1e23805abd..fba9a42b94e9 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp @@ -123,7 +123,7 @@

    We do not list procedures that have completed successfully; their number makes it hard to spot the problematics.

    - +
    diff --git a/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css b/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css index d1cc0fecf233..2661c8d62229 100644 --- a/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css +++ b/hbase-server/src/main/resources/hbase-webapps/static/css/hbase.css @@ -39,6 +39,11 @@ section { margin-bottom: 3em; } margin-top: 1.2em; } +table#tab_Procedures td:nth-child(-n+7) { + word-break: normal; + overflow-wrap: normal; +} + /* Region Server page styling */ /* striped tables styling */ From bc548efd5736eaf3d8b820a5d1a06892827f763d Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 20 Dec 2020 11:26:36 +0800 Subject: [PATCH 594/769] HBASE-25420 Some minor improvements in rpc implementation (#2792) Signed-off-by: XinSun Signed-off-by: stack --- .../hbase/ipc/NettyRpcDuplexHandler.java | 28 ++++++------ .../hbase/ipc/NettyRpcFrameDecoder.java | 8 ++-- .../apache/hadoop/hbase/ipc/ServerCall.java | 31 ++++++++----- .../hadoop/hbase/ipc/SimpleRpcServer.java | 45 +++++++++---------- 4 files changed, 58 insertions(+), 54 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 649375a89c1c..2a2df8a7ad4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -17,35 +17,35 @@ */ package org.apache.hadoop.hbase.ipc; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; - import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream; import org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise; import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hbase.CellScanner; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; -import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.ipc.RemoteException; /** * The netty rpc handler. @@ -103,8 +103,8 @@ private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise p ctx.write(buf, withoutCellBlockPromise); ChannelPromise cellBlockPromise = ctx.newPromise(); ctx.write(cellBlock, cellBlockPromise); - PromiseCombiner combiner = new PromiseCombiner(); - combiner.addAll(withoutCellBlockPromise, cellBlockPromise); + PromiseCombiner combiner = new PromiseCombiner(ctx.executor()); + combiner.addAll((ChannelFuture) withoutCellBlockPromise, cellBlockPromise); combiner.finish(promise); } else { ctx.write(buf, promise); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java index 5ed3d2ef43f3..9444cd0dee99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.exceptions.RequestTooBigException; @@ -30,6 +29,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.handler.codec.CorruptedFrameException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -124,10 +124,8 @@ private void handleTooBigRequest(ByteBuf in) throws IOException { RPCProtos.RequestHeader header = getHeader(in, headerSize); // Notify the client about the offending request - NettyServerCall reqTooBig = - new NettyServerCall(header.getCallId(), connection.service, null, null, null, null, - connection, 0, connection.addr, System.currentTimeMillis(), 0, - connection.rpcServer.bbAllocator, connection.rpcServer.cellBlockBuilder, null); + NettyServerCall reqTooBig = connection.createCall(header.getCallId(), connection.service, null, + null, null, null, 0, connection.addr, 0, null); connection.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index d20e28f8c786..a5c8a3920b17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -26,25 +26,27 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.io.ByteBuffAllocator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.ByteBufferUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.util.StringUtils; /** * Datastructure that holds all necessary to a method invocation and then afterward, carries @@ -217,10 +219,14 @@ public String toShortString() { } @Override - public synchronized void setResponse(Message m, final CellScanner cells, - Throwable t, String errorMsg) { - if (this.isError) return; - if (t != null) this.isError = true; + public synchronized void setResponse(Message m, final CellScanner cells, Throwable t, + String errorMsg) { + if (this.isError) { + return; + } + if (t != null) { + this.isError = true; + } BufferChain bc = null; try { ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder(); @@ -385,9 +391,10 @@ private static ByteBuffer createHeaderAndMessageBytes(Message result, Message he return pbBuf; } - protected BufferChain wrapWithSasl(BufferChain bc) - throws IOException { - if (!this.connection.useSasl) return bc; + protected BufferChain wrapWithSasl(BufferChain bc) throws IOException { + if (!this.connection.useSasl) { + return bc; + } // Looks like no way around this; saslserver wants a byte array. I have to make it one. // THIS IS A BIG UGLY COPY. byte [] responseBytes = bc.getBytes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index f3f78073dc5d..cbcbc9a8f7a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -40,24 +40,23 @@ import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.security.HBasePolicyProvider; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; +import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The RPC server with native java NIO implementation deriving from Hadoop to @@ -307,7 +306,7 @@ void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfM // If the connectionManager can't take it, close the connection. if (c == null) { if (channel.isOpen()) { - IOUtils.cleanup(null, channel); + IOUtils.cleanupWithLogger(LOG, channel); } continue; } @@ -416,10 +415,12 @@ protected void closeConnection(SimpleServerRpcConnection connection) { @Override public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } - /** Starts the service. Must be called before any calls will be handled. */ + /** Starts the service. Must be called before any calls will be handled. */ @Override public synchronized void start() { - if (started) return; + if (started) { + return; + } authTokenSecretMgr = createSecretManager(); if (authTokenSecretMgr != null) { setSecretManager(authTokenSecretMgr); @@ -433,7 +434,7 @@ public synchronized void start() { started = true; } - /** Stops the service. No new calls will be handled after this is called. */ + /** Stops the service. No new calls will be handled after this is called. */ @Override public synchronized void stop() { LOG.info("Stopping server on " + port); @@ -449,10 +450,9 @@ public synchronized void stop() { notifyAll(); } - /** Wait for the server to be stopped. - * Does not wait for all subthreads to finish. - * See {@link #stop()}. - * @throws InterruptedException e + /** + * Wait for the server to be stopped. Does not wait for all subthreads to finish. + * @see #stop() */ @Override public synchronized void join() throws InterruptedException { @@ -503,13 +503,14 @@ public Pair call(BlockingService service, MethodDescriptor * @param channel writable byte channel to write to * @param bufferChain Chain of buffers to write * @return number of bytes written - * @throws java.io.IOException e * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) */ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChain) - throws IOException { - long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); - if (count > 0) this.metrics.sentBytes(count); + throws IOException { + long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); + if (count > 0) { + this.metrics.sentBytes(count); + } return count; } @@ -523,22 +524,20 @@ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChai * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ - public static void bind(ServerSocket socket, InetSocketAddress address, - int backlog) throws IOException { + public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) + throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = - new BindException("Problem binding to " + address + " : " + - e.getMessage()); + new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { - throw new UnknownHostException("Invalid hostname for server: " + - address.getHostName()); + throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } throw e; } From 1a740fd12157d7512dba07796bae4ecdc2df9993 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sun, 20 Dec 2020 12:00:07 +0800 Subject: [PATCH 595/769] HBASE-25419 Remove deprecated methods in RpcServer implementation (#2791) Signed-off-by: XinSun Signed-off-by: stack --- .../hadoop/hbase/ipc/NettyRpcServer.java | 23 ------------------- .../hadoop/hbase/ipc/RpcServerInterface.java | 20 +--------------- .../hadoop/hbase/ipc/SimpleRpcServer.java | 23 ------------------- 3 files changed, 1 insertion(+), 65 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index f34cad5f60c3..1d3981f78846 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -23,22 +23,16 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.HBasePolicyProvider; import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.io.netty.bootstrap.ServerBootstrap; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelInitializer; @@ -182,21 +176,4 @@ public int getNumOpenConnections() { // allChannels also contains the server channel, so exclude that from the count. return channelsCount > 0 ? channelsCount - 1 : channelsCount; } - - @Override - public Pair call(BlockingService service, - MethodDescriptor md, Message param, CellScanner cellScanner, - long receiveTime, MonitoredRPCHandler status) throws IOException { - return call(service, md, param, cellScanner, receiveTime, status, - System.currentTimeMillis(), 0); - } - - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { - NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null, - -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null); - return call(fakeCall, status); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index e06daacf5fee..ee6e57a2a9f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -25,14 +25,12 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @InterfaceAudience.Private @@ -46,22 +44,6 @@ public interface RpcServerInterface { void setSocketSendBufSize(int size); InetSocketAddress getListenerAddress(); - /** - * @deprecated As of release 1.3, this will be removed in HBase 3.0 - */ - @Deprecated - Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException; - - /** - * @deprecated As of release 2.0, this will be removed in HBase 3.0 - */ - @Deprecated - Pair call(BlockingService service, MethodDescriptor md, Message param, - CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime, - int timeout) throws IOException; - Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index cbcbc9a8f7a8..38c771277360 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -41,22 +41,16 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.security.HBasePolicyProvider; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; -import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The RPC server with native java NIO implementation deriving from Hadoop to @@ -475,23 +469,6 @@ public synchronized InetSocketAddress getListenerAddress() { return listener.getAddress(); } - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException { - return call(service, md, param, cellScanner, receiveTime, status, System.currentTimeMillis(), - 0); - } - - @Override - public Pair call(BlockingService service, MethodDescriptor md, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, - long startTime, int timeout) throws IOException { - SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner, - null, -1, null, receiveTime, timeout, bbAllocator, cellBlockBuilder, null, null); - return call(fakeCall, status); - } - /** * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. From 4cd2d0546b8ec843977a6b0743d49bcc0a676a4f Mon Sep 17 00:00:00 2001 From: scotthunt Date: Mon, 21 Dec 2020 01:39:13 -0700 Subject: [PATCH 596/769] [PATCH] Add "regexstringnocase" to ParseFilter for case-insensitivity (#2784) Signed-off-by: stack Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/filter/ParseConstants.java | 7 +++++++ .../apache/hadoop/hbase/filter/ParseFilter.java | 4 ++++ .../hadoop/hbase/filter/TestParseFilter.java | 15 +++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index 9f52783dbb0c..b9132a3ba295 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -244,6 +244,13 @@ public final class ParseConstants { public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', 's','t','r','i','n','g'}; + /** + * RegexStringNoCaseType byte array + */ + public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', + 's','t','r','i','n','g', + 'n','o','c','a','s','e'}; + /** * SubstringType byte array */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 1aeaa13f5a93..e06c6b5c4139 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -28,6 +28,7 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; @@ -812,6 +813,9 @@ else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); + else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) + return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), + Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index 8dd15be6fc63..fbedc1c0e688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -24,6 +24,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; +import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -279,6 +280,20 @@ public void testQualifierFilter() throws IOException { assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } + @Test + public void testQualifierFilterNoCase() throws IOException { + String filterString = "QualifierFilter(=, 'regexstringnocase:pre*')"; + QualifierFilter qualifierFilter = + doTestFilter(filterString, QualifierFilter.class); + assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); + assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); + RegexStringComparator regexStringComparator = + (RegexStringComparator) qualifierFilter.getComparator(); + assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); + int regexComparatorFlags = regexStringComparator.getEngine().getFlags(); + assertEquals(Pattern.CASE_INSENSITIVE | Pattern.DOTALL, regexComparatorFlags); + } + @Test public void testValueFilter() throws IOException { String filterString = "ValueFilter(!=, 'substring:pre')"; From b65e950fa25ddc3ed093c4d9e74ae187040e07dd Mon Sep 17 00:00:00 2001 From: Wellington Chevreuil Date: Mon, 21 Dec 2020 08:41:52 +0000 Subject: [PATCH 597/769] Revert "[PATCH] Add "regexstringnocase" to ParseFilter for case-insensitivity (#2784)" Will re-commit with the proper jira ID This reverts commit 51cee0016f0caa76e4eaa9b44d45705908e79938. --- .../hadoop/hbase/filter/ParseConstants.java | 7 ------- .../apache/hadoop/hbase/filter/ParseFilter.java | 4 ---- .../hadoop/hbase/filter/TestParseFilter.java | 15 --------------- 3 files changed, 26 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index b9132a3ba295..9f52783dbb0c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -244,13 +244,6 @@ public final class ParseConstants { public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', 's','t','r','i','n','g'}; - /** - * RegexStringNoCaseType byte array - */ - public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', - 's','t','r','i','n','g', - 'n','o','c','a','s','e'}; - /** * SubstringType byte array */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index e06c6b5c4139..1aeaa13f5a93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -28,7 +28,6 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; -import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; @@ -813,9 +812,6 @@ else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); - else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) - return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), - Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index fbedc1c0e688..8dd15be6fc63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -24,7 +24,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; -import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -280,20 +279,6 @@ public void testQualifierFilter() throws IOException { assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } - @Test - public void testQualifierFilterNoCase() throws IOException { - String filterString = "QualifierFilter(=, 'regexstringnocase:pre*')"; - QualifierFilter qualifierFilter = - doTestFilter(filterString, QualifierFilter.class); - assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); - assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); - RegexStringComparator regexStringComparator = - (RegexStringComparator) qualifierFilter.getComparator(); - assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); - int regexComparatorFlags = regexStringComparator.getEngine().getFlags(); - assertEquals(Pattern.CASE_INSENSITIVE | Pattern.DOTALL, regexComparatorFlags); - } - @Test public void testValueFilter() throws IOException { String filterString = "ValueFilter(!=, 'substring:pre')"; From b20daa97d6c4dab7b2760e88d62b1642f3437e33 Mon Sep 17 00:00:00 2001 From: scotthunt Date: Mon, 21 Dec 2020 01:39:13 -0700 Subject: [PATCH 598/769] HBASE-25084 Add "regexstringnocase" to ParseFilter for case-insensitivity (#2784) Signed-off-by: stack Signed-off-by: Wellington Chevreuil --- .../hadoop/hbase/filter/ParseConstants.java | 7 +++++++ .../apache/hadoop/hbase/filter/ParseFilter.java | 4 ++++ .../hadoop/hbase/filter/TestParseFilter.java | 15 +++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java index 9f52783dbb0c..b9132a3ba295 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -244,6 +244,13 @@ public final class ParseConstants { public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', 's','t','r','i','n','g'}; + /** + * RegexStringNoCaseType byte array + */ + public static final byte [] regexStringNoCaseType = new byte [] {'r','e','g','e','x', + 's','t','r','i','n','g', + 'n','o','c','a','s','e'}; + /** * SubstringType byte array */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 1aeaa13f5a93..e06c6b5c4139 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -28,6 +28,7 @@ import java.util.EmptyStackException; import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; import java.util.Set; import java.util.Stack; @@ -812,6 +813,9 @@ else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); + else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType)) + return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8), + Pattern.CASE_INSENSITIVE | Pattern.DOTALL); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index 8dd15be6fc63..fbedc1c0e688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -24,6 +24,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; +import java.util.regex.Pattern; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -279,6 +280,20 @@ public void testQualifierFilter() throws IOException { assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } + @Test + public void testQualifierFilterNoCase() throws IOException { + String filterString = "QualifierFilter(=, 'regexstringnocase:pre*')"; + QualifierFilter qualifierFilter = + doTestFilter(filterString, QualifierFilter.class); + assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); + assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); + RegexStringComparator regexStringComparator = + (RegexStringComparator) qualifierFilter.getComparator(); + assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); + int regexComparatorFlags = regexStringComparator.getEngine().getFlags(); + assertEquals(Pattern.CASE_INSENSITIVE | Pattern.DOTALL, regexComparatorFlags); + } + @Test public void testValueFilter() throws IOException { String filterString = "ValueFilter(!=, 'substring:pre')"; From b8dd686008f41d0e9c77a8f669da463b9eee4330 Mon Sep 17 00:00:00 2001 From: Lokesh Khurana Date: Mon, 21 Dec 2020 15:33:36 +0530 Subject: [PATCH 599/769] HBASE-24620 : Add a ClusterManager which submits command to ZooKeeper and its Agent which picks and execute those Commands (#2299) Signed-off-by: Aman Poonia Signed-off-by: Viraj Jasani --- bin/chaos-daemon.sh | 140 +++++ ...hich_Submits_Command_Through_ZooKeeper.pdf | Bin 0 -> 270679 bytes .../apache/hadoop/hbase/chaos/ChaosAgent.java | 591 ++++++++++++++++++ .../hadoop/hbase/chaos/ChaosConstants.java | 77 +++ .../hadoop/hbase/chaos/ChaosService.java | 138 ++++ .../apache/hadoop/hbase/chaos/ChaosUtils.java | 49 ++ .../apache/hadoop/hbase/ChaosZKClient.java | 332 ++++++++++ .../hadoop/hbase/ZNodeClusterManager.java | 120 ++++ 8 files changed, 1447 insertions(+) create mode 100644 bin/chaos-daemon.sh create mode 100644 dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java create mode 100644 hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java create mode 100644 hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java create mode 100644 hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java diff --git a/bin/chaos-daemon.sh b/bin/chaos-daemon.sh new file mode 100644 index 000000000000..084e519321a2 --- /dev/null +++ b/bin/chaos-daemon.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +# +#/** +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ +# + +usage="Usage: chaos-daemon.sh (start|stop) chaosagent" + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo "$usage" + exit 1 +fi + +# get arguments +startStop=$1 +shift + +command=$1 +shift + +check_before_start(){ + #ckeck if the process is not running + mkdir -p "$HBASE_PID_DIR" + if [ -f "$CHAOS_PID" ]; then + if kill -0 "$(cat "$CHAOS_PID")" > /dev/null 2>&1; then + echo "$command" running as process "$(cat "$CHAOS_PID")". Stop it first. + exit 1 + fi + fi +} + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=$(cd "$bin">/dev/null || exit; pwd) + +. "$bin"/hbase-config.sh +. "$bin"/hbase-common.sh + +CLASSPATH=$HBASE_CONF_DIR +for f in ../lib/*.jar; do + CLASSPATH=${CLASSPATH}:$f +done + +# get log directory +if [ "$HBASE_LOG_DIR" = "" ]; then + export HBASE_LOG_DIR="$HBASE_HOME/logs" +fi + +if [ "$HBASE_PID_DIR" = "" ]; then + HBASE_PID_DIR=/tmp +fi + +if [ "$HBASE_IDENT_STRING" = "" ]; then + export HBASE_IDENT_STRING="$USER" +fi + +if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME +fi +if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 +fi + +export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-$command-$HOSTNAME +export CHAOS_LOGFILE=$HBASE_LOG_PREFIX.log + +if [ -z "${HBASE_ROOT_LOGGER}" ]; then +export HBASE_ROOT_LOGGER=${HBASE_ROOT_LOGGER:-"INFO,RFA"} +fi + +if [ -z "${HBASE_SECURITY_LOGGER}" ]; then +export HBASE_SECURITY_LOGGER=${HBASE_SECURITY_LOGGER:-"INFO,RFAS"} +fi + +CHAOS_LOGLOG=${CHAOS_LOGLOG:-"${HBASE_LOG_DIR}/${CHAOS_LOGFILE}"} +CHAOS_PID=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.pid + +if [ -z "$CHAOS_JAVA_OPTS" ]; then + CHAOS_JAVA_OPTS="-Xms1024m -Xmx4096m" +fi + +case $startStop in + +(start) + check_before_start + echo running $command + CMD="${JAVA_HOME}/bin/java -Dapp.home=${HBASE_CONF_DIR}/../ ${CHAOS_JAVA_OPTS} -cp ${CLASSPATH} org.apache.hadoop.hbase.chaos.ChaosService -$command start &>> ${CHAOS_LOGLOG} &" + + eval $CMD + PID=$(echo $!) + echo ${PID} >${CHAOS_PID} + + echo "Chaos ${1} process Started with ${PID} !" + now=$(date) + echo "${now} Chaos ${1} process Started with ${PID} !" >>${CHAOS_LOGLOG} + ;; + +(stop) + echo stopping $command + if [ -f $CHAOS_PID ]; then + pidToKill=`cat $CHAOS_PID` + # kill -0 == see if the PID exists + if kill -0 $pidToKill > /dev/null 2>&1; then + echo -n stopping $command + echo "`date` Terminating $command" >> $CHAOS_LOGLOG + kill $pidToKill > /dev/null 2>&1 + waitForProcessEnd $pidToKill $command + else + retval=$? + echo no $command to stop because kill -0 of pid $pidToKill failed with status $retval + fi + else + echo no $command to stop because no pid file $CHAOS_PID + fi + rm -f $CHAOS_PID + ;; + +(*) + echo $usage + exit 1 + ;; + +esac diff --git a/dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf b/dev-support/design-docs/HBASE-24620_New_ClusterManager_And_Agent_Which_Submits_Command_Through_ZooKeeper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fe35c04ebbc369b322bbd7a92a5f1cdde157976a GIT binary patch literal 270679 zcmd41W0WObw=P&|tJ2y$ zsee*AeFtktz+VsqZLOTGZ5#nC06IAbV6&yNoA%G^A ziGc|-6T7i711qZ$i@u2wGb;-{6FWV-@ef00Mpk1Mp8sbF?Y}B;baF7(w}y7hG&C^M z+cDKM(9`=6?_K|AE5)SNQ(_RfhhT z=Kq6Na{8vmj(?5hUzYKYaSG}?>08;F{!esa`1|}nn*47~RT04U$14AA&af(Xu*yZS0kCr4OzP=tfYYcEf;c1Gw;1TJwKaNLH?PN7=kKDtX{a3Qj+w?C zNXdE*F%bC>BQP-*F`qFvDm?nYFw|_xt|>e9$;+t?GJy{qimBz~x(5-5ioPj8Qc7M#z_AU7SewRrY~UomqveU zSpV>h?GID`i2fnuAIbjBgH_=2{08D?U zqVO-eiu@VwUlJk=^Z;h&e})ucU;r@x^?H9uD#8q4`nOg@SOLuco|Fh1fSKXn0zUxE zjQ`HE1DKiqmHq1pi>5dIp+|6~RKLgxS03jVEsCU(~UY5*77t5pY0NZ#GLv(rF?j=Fw; zo()W0UNQ^-e(VZ3@DIMsF9?CgLbTNbb7k{0JVUM_>bXM|;&OOdRLNVyxoY0=`;3_1 zx6|8OpKnXN_6pkg@1J+;yq``lvL0*ho*%2H)At3pb6>A-vMIZC-CwVd{j#sOXJ1t; zpr^D7YRjrO1YTjuz=UujOyX3qo zNV=O2eRQO#nifRwxZ_xR8Cj+r`u;lHk+DE~zm=G66v@-*K(d zygs74qHi_lOi7MD@jeu!bt3WHI#F_lO~O$f)nU8uLmL*qOyHt1fS62jmDWf;oKyg3 z*1K9uJ8NED*|ds{aOC>E@H`+{V^Hdu)wAGLXPb4erZ(}y=(oCpY4g|+OCD!sfKpTX zNZWW#*h`QO=t6bvFxByjM~+gKbeqlirNwdpt9-3my|C3*eejiWS2)jhnM3J&MM&hh z5KeyVQ&JJbbri7u!c@2n`hwmSh1TTLdLg7G8NOY@Cd1A-%GOaxDy9rBxJ{b_0?IiM=HP$$vZ$S~`0 z)W<^lz1Br~xstw4PV=e|nSzHFx&E4Q=+Pqj=tJxk9u=V)!&XvCsq4Vj0q>#ucc?D^ zHpFNVUw+|*n4Yz&R1+?sL7RN%R<&<-CUwHJ;Qp%d^%%fQQIbh&TqGo%Ffn0O>z#$ooB+XT$8VzXKoOa|YjkVC{Gefn)Gm9GM zy0jPguIx(s8Pz!TVB6f#4*0k$K+HX3e`WS6gho^itOn-`QiF!*r3POYsz5K^V_v{f zAn1m36bd?`d@xw+#Cl%%*@*`_uoiy-Z-Cpcxq4{e%~F;aHF61%RVMbSLW-Bju$w#N z_eIrp5kU|6r^&dbkuKoN&9{tTOZUiwUIRZZIhG6b`|ac1C=`&kYTAmvEn$Frz7nRx zEeyCRiF?vQ9%~ftID-ujL0>lWHTgH3y=J9XAEXiH$D$Qh=4V!=r>Qy3uIed`EF6s# z3{QDlc4i+kC^Rgi%N!pQg-U?R863<(K#iSOu+?n`PAqm57D7z#ziSOxwcSOGp>K#Y zY7{pX7T8WJNOvb339ZlkV$m&-F_JRtTKs<9vV0r$9S9N@m3%FF)xY-}e?IUFDIIh! zZPakakix(%ALS{?$SC*>Md1dta^_DEl|(8qL#hrMWsZIDD`7AzZ0FIY!du|MJJ}ug zE%lN$?}sXftOejmqd5SPdg$ba3M?{G1=OU+%;bGzxD48J zgXk0nixm;h!2eBTl($NWp($S68vGlQl2rnP*o?))6;BpKTx`VRYFSzDCO0oXGY;A_ zNag#Qnwd+F$1aF2Gqc(}3Pvv<{YWvPxnofojFFkp;8r}D)4|l}ogQzquMgdv+32!PL3&sWd8X_Hn0bVNx*tq_l=r&9xFf z9YpXR5RxFH8p?ro{vROnFnEwhDUJT zbMyvtlxVi*?rW5o5IG!}26J9IYA)dlFtJ^%$zi@rBtbQL_bJ!0Coa3DZBrk+88ioJ z63Tt$dA`NY35y0Cylh{q<6xP-k{nPi(}ZU$G#>Dz=>{-ovW9yOr5|PTDPAohzk!LS zgPV@z_+H+o6inw@jOg=av2S~&Xiq;}NW!RS;18VXMGUcP*@!b#0%J3N+ESGC@3{-1 z9!d-%!TZaVP}AElBpGy`#K$Y!k7wJe)b-IRTA?ckPr|luEze#m0|meTNb71f6o`@k z;f&}a{SEO!nie6VZ8Tk{+8RD2S^QVlqIZe~s^j+>WyShbg#*tmamq-M`C9MQ4d9m3 zE9%QYeT=wP!Z>6%9w0hI&TDlJmz{l(Lg+Zm47N?W1IJ)N z%%QxxP=D=7`UYe|De{dE04y@P0*>eU$?~R~mdiD+kp9RH!Nt#ohjc^+v#6FHJ<+gQ z3VKT29Xra@F8jy?q_D~MGN^U`_G(t}58OVPOVscl2muIkOo`A(hfz9>kD#d6{Io+} zbh{R8u=L@yW1AS%1?UY$t6{P((r@cKAjG6_G}(TFEx?QLK61PM(qPJRaL-QWP<$B&k)PO^&~(e&yc4I z2l{F;&Kf!rcnzOE+wC{ZG3jn;A1cD`G3o^Tzx8 z%5os^j0PqY8wUIb7hpcg+IpjSZI+Sqowyu8HxDD@5u68$4!E@i`e?D}p$~gUxa?69 zN_`J%m9Dx|0$k`-nh!WFK9=bF+wz{vk|2O{l7QN+4tT)QRqVTy7;VCLNA7NJI#?cJ zTjg73F`0HBA*chtRMb2Ju^RxjNUMN={x<@@y)EA=fr9BqRyT=C(m;F$S0Mw;qJwimv(z@PLw>iA6kZ!%P| zI6TE`TN?Z*Tz+8jngejaB5L|bp6zwD#F6N>G}dgtz3F{BIRWd2Gl^}SGl#O!rp2*|b*_&O++d*J z7QsbnjopSgUboBBsHnz2tg!OPYI`;#L@&t9NO}h{FkcxRxmgA4%>Z#AoY@dO7qdGs z`$1YwBN1UImk$ITHP2I!Z1vU3jzrEN@P3_wd`Ybk(1^ugL8kSrVDma=Q8Xq4Y~W+@ z=(&WmM;|@y?XyKt8*|OLocrlcklFsZhc}7mAa*Yz(_?v(u9U^F_8MV*U-}A4LPWt? z6Jl$ZmVw;KVb?d&JD)P~drgFYIaKM8%*#FJ{Fy(%V54z57xNA(8rejK=j(l=TBm^{ zQ5v$rb#O}3X#O`#%I;T}?)>&yV+6Xz%sgz@lf#wphGmM+)+@`}6K&@k#usiV@8s~c zSArSay%rR^HhDUPUD+j+!5$A@EaLlB5e<$9_Bm+B>G)R{_7{^g)k-3Q3w9#0-nv*a zx49E5O}|VWNd1&gxxG@g8;q&quf7OJsK?`4rq&)l9v5IFo4V*EA0BYFUZ>_eWMGFB z@TcPM6Y1_k&xge$AYCb$efF);2li7Zp0!kV#&ljhmC1OV+WXKlao9iGmYcVdDZ(&{ zuX-ljPP8yw9Qm^lMM9Av*6{;&zui;hfwE#q38aHWM0}Wk_n9FDHhrJA%X|Z3)!5^X z0HbDrh6Ze0G13Srxpb#G540rAZRA_ht`nARZ8n28ra~3U_anr+=KCp`5xsbQ>CngE zdLZ;7LXIALVzq>0#>hH{6{L1Yg5@jAaO!D7(!#Td#o&3u#z8{@Z899wIqILM&|^wy zN`l_&`JL^bnVEW}^-1X#Lr#ZZK#?|)1|%!Gbz>m&I6_?c8a7=7O?Os8QVjCY`<%xg z!s8+Y9oUtKBMH>fxL6z7uq}{%rckBkNc995>vRn^`|Oe+m}n%}Qq5i{Qt+ACt?q+= ziCp&lfpcULvpwKw?V9$C>n zRp~PDJEL+foe@3Y@*39djiIe&T&I5XN6@9q4czk4h43dscf;}{Pekx}j&^@!D|l*m zjN~P+iu>jA`p&#g^zM*T;l^F^5>3{vF#X253rz-_A!`~Mt4D{%afX)z zWrlJN7434>rsw2)>L9{c0{qME?y9?RI|)JY@r`;5Bxl_Rhn2VL`kan40R^(xsrFO~ zOW%SYsEO{U^rAyIspwo3dnWKv%1^CF!^rGoltIs%hW@77s-JrvOVQI%5fMGEPrhXJ z>L33t8ulj^`kyh8|7|?ypPl^QaKxX`!+*qe7}?pG|4U$JFvZz9*EHEEew2{_ zQZb9uCSi`)KsPXMLWnR~XeLe0#GtMb%3L5QKM%#Q)TT{gQQG_=q)C8HNx8JiNl;Q* zv%ySRe(knnx|7N4n)a*9^~meWJ=t^0aY}p29q>&J16qo*HJH4V4v{D9f@0r-Ou|Jq11tG#1%gDmC<)@Yy9SHC)+$r!cX_D* zmC(1@l0_W}s^Y|oq~u!epc@u)Txz$y6{6AYJY*qgwKY1lgl?m$XflybS+yl z3^%-!Cm7O4kVfLz@@3G8fqRLtPo6hLbDx9lxUkito%gu_BHyA%cu5vOd;6Yx3QUM3 z?b|oOHNAvk!fbGggFq9FInD(GlRkqVCdie=kIY=KJ*#BMl|FCIEC~D&a%3jnoey@0 z@_i;kCZ9xuNwZgkOwaf;AOkQvZ5GAqgu^CivX(mhV@QZ&>@H1+HxJIH2%o^R1MK1i zf_RUWBY>EA{;4I&9m6_KVml9ajmcIJf+NVtsW4EC-}Du&^h97gm%EK|ekKY&7o=_8 z$4LlbEmY=2T(^M9gP}c_%{#bCtb}eB&ie$$NmTb0hkGXGF`o=)LT4nG`y@*0753C_ zw&Wy``W4hWZt{G10}jIB#PEKYZ!H)WHa}}*;bcJX6_OS<4|hZW&pQlg;$lRE$J-AF z))0QcZY@MBGWUrAlE|D2ykAh09yE9&Z(?$!pWFlCr6q7{kUlccRW8^5$I#q51`vK{ za2$EQbW2t%l1*fRh@0F5wGfnb4|(ET21b;F2Z-yAj0divTrbYJ*0k)An+60vsT@dz zoG|o|`F%mUjv#}D8I1&eoQ^QX9cp4;@=`8NLYixSw8}oMR9=d_zo!{0B}0@>pPQ7a zOi$-MVVRsP-Hy#f;jw&_`aO$-IPAAC3oFV@r;T9^=s*)31ImjcAJ0>C6bQ z2G}S1oFQiF{1STHpQqvzJ>QQyC&->By~6N@rMRSG^wIE&idtm%t*DDc86aY2iCXYw zXZlmey&&(diMhjG+|n_77&QXGCLn4M$IThgcGWe42GvHr{8|wuuW@L40^Q<(-5}{^ zy|^lFkSg-s-B8NSX_F1$He-fU9j|}r6s@`eTc*kGv9uw`GMMg~GtWp-_j{BLJ9JEW zg}BqH>`15ko7Val8{sNxOWIY?SOSiE8Y-p~g0^rqrgjVrl+D zv#d!X-9_z?MZTeIl80L|;Jjn;5?hq33A_{m;mN10=$A?NdsrFt!fiL_+Ug-w3TL~) zRMrEzqh&Tg*bI+a8Q%z^`-Nl6Fn-P2RjAMrSp5t1%7FKd+k}C2|JzF~*!Ij1t3iIs zzBjDqUBs7bpe{1HrhUk$!O#hY6@UB_c(0(GR8IRu5kquG6rLdl$^*LX>4RRf=inC# znON+cUE{UOyIO=}3WT>Cvb%4zGjd-2 zI03dd=x{^)@6v&GkYDuO{Ul?9`iO7?Pr!+RL;`ZGK8PGgF#@t6*9r7;XwlF_t#(pD z3CP32;fy!+VS1m0gcKlTL20l1g<*scGT~;wLv@8=@7Rzez3nvyq>I1g@5&_0W9NM+fJulz!$h2cy{ z#cibP(s}zWdgDRKudNJMMx7QO@P6rKru?Id7v$R29qh~HI)OP#GOIXk(DofZ5wi)~ zR0}@$+<^PNZN9Dll{O^?AdAywfAR8#H)LgqC)ydaou2fbjepEQ4v=QT_w0+IcL!U@ zY2kwM=~sJH8@$@2+yoM>e(%T=1V2P~GT`jRxdRs`W z7Y=A1ur>uf|B$q5U}x+@!82h#aR#QKWgn-a5Z4)uB{nrJDXTj-48Fn&R*h3ja_)>F zbZ22Z^^k?|Oy1Zzh4kR4@__!C7=6EhGD;#jW>Z4V64B?ywv5hh98;J%_dBq$jX)eZ zU%h7i1$s0|?kEvuH%UuE#ag}R(AmKg?r21cz5WLy3m9CSNmY7laYIr$me6{aK>5j9 zn+MB@UW7`!@_jgMj_&BVYkQ_j`)0vuY-wXrkTK=1hNeO*789Bn97P5Q3^on zakp)Q=Iq;ZzTXxqtnT((UuP3rjLs6g*08?!s|OhOS#3ZD5SC>D--%Z;KPZBzW_s16MtA7AE zvwGdJFI8}4s(yIo)wW@A6xMc6j|Y;gmJn=L7LF=QbYvnq=>->V1>OQTyNI7KlJF?{ zyK0?Kl1v^d?Z;~YRWo7at|ez)ZH;8)_rqxupS@0X?B4vcC!b_`DJE_Ow($)&xYPJ} z_evJCDc81lb9?2gYhNkYB3aFE;GEc8ZNzIVce7%1)3sL-#mhE}-{2_UD%he?5mSd{ zC(43D>(WmDwKW^*G)hy^vNzNj#I>;Id{L7tJ29am?eQ=-VO73qh=BJ+L~Kb;YM5`( zR!jp!RW*cie%u{`cCP|rLDm|PwG8T}T`(l)m$&rkn91QxD}Z~4O8VepqRViK{4!~r z6`e7w^jljh7Ue+Prd9263n2l2|xYAzPrtCX-TvPafWnz|uSw?beWkx)j zRt%kXM=PsWb~WjA$NuC!xZm!}vB*dDg0xGbS7CCF<&ZW6ZYK*~Qo7Z=Q`W%+N&Qv} zq8Ny@ZoBh&+m}A=!fQ6;d7HSLPSN$lBy0QU4<1@&%e#zp{$IokXVI3G4Lb&5^jJuv zgE(j*Yfl&kEG1x|MXkY4PSSn31@5f?FTZssW-)VsTO#R$@jPC6_u%5 z`bTPGX)wz&+t{in@3c-%ds*BcXYnn~4pZD^t=(9(nT|EI36_f+%@X*Q0Sry1lD;;z z0n>{WREHHtgt|o%nYCv7rN*zTOo?o|8bwDtUp_x92w`uU)B=ruvJ$WK!MUt&8q-+K zm#kVJ8oR7VU>XcE*XBzFVJ29K2*%ThEH4xHm#ISmfT} z5dF(?CLC7SH(`@Dk+gLLH#?6R)r)pS&MD32fb!3aZBkf={3SR6nrB68l)-2tv`+02 z3pA`M>>ZY~mqon#kFia1jj|#3r6@#Ti7MAA!kGAFjhkb%mZaAuyrKjR^|`gc^z?&+ z1FVOcDXsXxh=Uz}U!Y(oy!ZdU5%~KB@&DwL9wQ^ue|huQj^(!)pobNH;t5V>`q?YK zL@*;mVKi%Qz6SE6DIN=Jm3)e@x%xbWLP;<1{R??}Zuzs6EQhb#z6OLem5Y6d#mWjv zB$KxAb5UH=mY-&4p;LiekdG^Mz=SdfU+BeF{J+EW z_v`Ba0Za@4Mn(qu|J*P!F#b84@LzDd@PhJ6TwHu=oo2EmO>k#SBTjfoqe(C#CD3P7 zhZ;?Qi%UfWO#;N#2(uCZ3E6`vHSCF;(Q4Q?uO;i}{}8JzeYL2BQES{&=#kKDBy12q z6FGX@=43FF-i~>C>)O8h@VTmOrY8i<$o zd_AqiWh(jdYQ;SS6Dnn!;x)DDZqS?}uJet4G`?G|aG8{T*#XF7tw|Smeb!IESgswH zhdU%Aos#8nyDIatk4PPOB*r*A)-`%}<8(bp6S@YG_cxpMvLE}ppC3r%|6ZQ`$`gh; zrgdY`ePUgkhVWe^R4UBMd{-sBAWZcBIi>FHGuW6)Zm#)~nOvF5+x&Ll${qVfO^MHK zdinBE+-OcCG?_Wt=yk~sGfviA(77r~oVC?$u=u+rCY!+G@K)=C3jJ~V`Tf+8eB1wl zTSQ8S@FU`1(9{G`do;>Gm*kb-ubBDuxRpW3 z6yR$3^@tV+zeKR4zp+AD6?^ExNJoApX_Nx3h(z^>7Xr!V7e6DVQ(qfd0LkR3=y5gz z$s(=rC+vlmCddHG6eaBE$e~};OBYa#Rj zWblA&N$>&o64;^jApRu%2=IaSiuVD3FX|~dIqo5d_~t9TzTaI?0>&dKzf;c)`HrxI zeUHNp?v?HG%~v`GL>KWHJ7Q3`1pO3hN3sFtxvod`1pQ{&jEx&=L$DjPOG0+gSF*c^ z;6S%Pm*A_dhwa21AOg}Q{spuFWWzZ^m}*?GIs&a2<0`c`bXt{!Ddm>H+r*)CvC{yYCglXO6Xcz$6XqTJgXfd*BkPgCH}R3^tF&kOgnNhg1fgg8^yAs~1jb)? zj(UUX;qo@|HV)$tt+s~Ex0{uS}rHskZ3i+8$s`{v)-t>Tn-goe}5FYEcg7jDUy zui5h_^mvyCwqp6^$C z&Xo^cqTBs)Tp6!$q1KDYigVv2V1qHjm!6B{v(bmbJv3ziVZ>Z$B1Mcy7U;47!PAwg zp*U10(a6GZpI#Ezu}@X3B6%$h7$M(OyyjYGWq|fMnc`m5Bp5HIBxZ$ptp{2U`N}Wl zBxu;YFKc5=CSY=vuex@y1uX3zaejwHMnA!RySNvgCz=8B@eQpltEgu?voAgs-E|5C z+li#%&2^<;-KpG%s?e#ojW&iCp7Vt=V{j_+xKD&Gw5S!;^-i2)#HT03>D7&nYlur@ zG?F?Bbu?uNKOgHu=g#oPG*1xv`6^@~rYP6udqtfH{)w6w(i8xWrv$k2X{WezJhMW< z#y>?PxYtRJ8-5s!D|-u zxHJCc2Gc#dSjK?K$T2M25U0Hfsarp5q!YW<)^x3-?EW2fJp&sjX1wp_js6qO@Bs_l(c<=e6g?FmSJxwBVCg&?PS-YUJ(=-#kHTZ0_-y z`Z(uW%hiW-b@OJg5H8Iw^^#MYCGLDrji*K)<*)ne@_vY#Gc6f*=1!2g*Bxov0Oz0j z%UdRltNM?@2Z>d2Hi<6cUj!G)k0b65 zd^Sl}@w#)Z&bDV=j8ojnZ0`%#yDcPmk*Wi1!$_L_+qa4%!9#3&$&&%DS}}{ZAtf!y zo}c8@8`v}LAA637OS??35tzBs>Zx&|?&<_oy}zgQm{#4bklXz?hqDc@Y6H(L!wZc@ z17-Qj;)})VizJI;imDHZSE<-WkH?!wx|6yWDi=eq-G#nVKf`u&YkThJmNla);buCRH(KfQlSW(gO?&1McIviHmKW{!~ zY0BxG<{3;EqfAOKOsL7HJ(Vhj$YG!JG7ofWyeH?W7VA#fiu(k3Q&2gSRg48YWOy~b zgSaQ)#Nj9Drfko=I7_#UwiUGs*KY~u+@on z8A7q?gTt6bqa@@cUUrz(zeBu~%oPc>smyXAshpFzO{CmcpUyqt&)g}ndI8sBAGXfX z6|EDKmB*8vu>RHwSXBS%zD0GB5;@0ecimzMpEXo>V%s*q%kLxI{*X~ql2GVdzMiFYBeEDsv<9r3Czw&kQ*7%&M6lyq)g8E6xCQ3ihHn?)ng*mv1A6ZX$AnFsQ zbAoGSa2eK?NTe-0J9fU5uS-{hZtzg|Oe?G&exyWi;irC%gE~h16#sxYhw8g*{}X@T zccD`jE4++g@;4kad!KiliFcqCLK+@UpNuX|{6f_g3hJ!vfCmQyblatjPz3)w`Hg;8 zGsgCG@KAJs%M0XVy*WS!HLtskHQNcA9Ei1m~4blfl z+g9YdV!+#ckh)X|f+nf+xU2%qxJbntR);E!jS9NWC$L-Eojz`LuXh_h&jBh&9||k> z?7YrKrqssW3&u7m`$gFPvELhz+VJ=azyfUtm%`8cHNL0@%baCs-{sL{WtCv!R9lEx zl$`cRxsS9)C3DMb0HI|ZBQK(D#ic{^+>g{rq5S=OJ_de=CDY}J_sKHMrdV%=TIHuV zsr;y&X@`ggHSZ|n4xD1HGSki~SslrqZSgDt`2Ke12G0UE#*D>a|d3tn+<9pP&0xBfIAca6c&AJ< zMpA=2*+U!Q=T_L%RTfukVoIl@Jj0VFY9|tU8L4*)+1_tdtEwvpi=Fc~G8vlQ(kKVU zL1o#8B9bcBYN}g@@?})R`r=8U&ovd>W-14<15P2I`Vq}I*Y?@o#UrhS-w`!Bs+vY~ z>B1Rn9-0I;?Ifjx>vi;{4M9z0>amRg^wIlAThlZ+DWk%PrnZakX%(qT#Ov#8Xrs4H zx0pf)0Zpwigxd2aO^rt_o0~->NK1^{oOA+PM`1MNYQKbGkr^64Z5(})*3sF z%;s4bf{z}+6VTnzC$hMi2&XTO3=6>-Y%GcI0s^P@g&~`pCpvD|E7@<0S&t3S>1@`V zim2+G*CuS6+t^)P1;Jk>z!E&zr2A%;^^n&~+Ue69YCTPYtOsIgyS{go4ZNaxth-5H zeODbgw|=BuYbZn)P3WW$D5-K5t*>v)a?>5NakNfnm_LhVK5}b6ZaT7YohepahBq;< zs<>)OOtw;0K*nz89%PLdZ}Q=+w`-6-0Og!+%;9LOkDMOyWJDp{OTX%qGV!P_#)5~j zG@F0Ns@Xj?IYCwTLsrLOXuh{m`0d7hg+Gfc@({X0i7rtZrYk1 zS%$UNJt`W|D+cIM9#{!r8pOdCYOQN9l9p;4U$1WAtMOh4?#x=(Rt>puZGQpM#ho|9 zgN}QcHz6IPO;154sia=Pjq`9HpJAgpExAd4*lvh)xq1*tz(=+B_82(8|ClZ|Dr}@F z`&kqDRp_pt-|5}f?7dczR@EfkN#wcCv(H&1`=9I43F0k}P3;}ZECl$e06_U%OqWq)3h%cRq*2GWZ!7AJ^sO(b@$KX_UQ| zc;3~CZX(mZg@A;(bo`_hCU8S3R&(>hWo{zHc=;#T0_p_*uSnvr0G2q+=vOC)KPSq5 z$1J*1ik@*sFDeqGm`~78bVoMd6CEM6j(t zTOO&bOok1k120Djv+0deAT3|QS44j_PII}Fr_27a-nGKlaB|Fd<7?@%;8~MZisLzd-TQzPFWct<>a{Q!Qmlt`Pg`p@_>_a()WaSF!$C+B{LUi z>FD^$I_!8C%|}gQNf&8qp29T8yM6-!7OlXEQ_S6Tuf|_iIJT09*8A|02_Zulm}F7o zWmVy|n1Pq!mYEDHbqpBiTx<1=Bg(CcP*t!#OMzCh-YR9CO%qUuM+l-J(4Hohr(^c~ zAA@7{xiWE39D#e7x>Ft=Dz}D({s6;Er|RL6Y5K*uIC=TF%c=@UWz|3bVnRJxXQTZ1 zWn-oAHhP&2&C}$k8lTc%g`xx^}z(xYNhwCJ-Ohb?f_*eyVQ;1;9^H{F;y?V0PD zvg}cBS@@jYE@Lt!$@xK9@fL~HP2U>}D++6cROd7{45;K_3Ik%o#wMbUoR3N-IMBm{ zOa`gb46Ra%&EU?ki7WG(`7PYYvx<(d7g3#2S%w_dWMa#bEJ`^Fmpj0AQD2EXO0}v_ z+wVI)vUn^FCe3I1Z?N^9q90*FIpN`Oa3Md-L{Bk)Kzo;|Mn8DD)P!PTUo`eIHc&l` z%?geY9Ing;j^!;(tY!Nrxxm*5X$e{;p-V}4OL%iXfUb_+9MTKtrMGF>#uJ`dfw}v* z`VZ=!$&SiSmKde5=eTz`J2>-Dv@Uri9{??!kRBl0GX>>+Plvk$1!;814UJvlLwv=B zMe8Acq3Tx#OVV=X-cY+?i;7z-i;x445O@+?Oxy5zfl-8ms)~qpx+UU0(fDqdl%^rC z2;f}yy$u?gSLtf-+E1QHBo1ZaB;##pXtY$BTZ0O|teBfA)7Qluo+Wi`)bw;R8fPS>QclQB2%4qwb>t}OG&WP|YFVeyf>^v^Cy_^E8c1o!A7 z8eqhdV-B<(HI5Rh&%I5MMRU7MaGG){mLmr)*+CwJjtFX7bma@$w9K9q-Sy z7#*e9<%U+Q+hAE{5AM32_iyAF@m8Q8X(Yp_K+irU&tz08ItD1}U?6(n*p2HF0y(#JhfIHB?{hpURx{az% zwl{=pCm7kp-GTEICE-rc?XnpvxoTEXBw(uPzH%OdH->FlM`kzC_M%=nO{65ssZ?-{ zAy8E^ouxhMa#pk|mJ6f5S&F0lX^tQy@vUmj61*pEENc}BTBlANnNc*yS&|IcPR_cZM%V%DYI|*{h}E8|!ZL*-4KrMpu$< zniDbC-`$%=_nyzAGA+bYD=ir_xK`ECe_qYb*&8fzsGvJkF-uq#Sr{3MgoushqNXrg zo`BY{B(pGS)~`{s$huH=FWzVI0kC6+9#-5i2ypX33a%rN&N9IdZ+NKKWLLWP3kbZvbu z%8`3&35}$#gUTrTgMHN1!fYjcPGwm2?Yz<*2Ne}zsU*o%*xt{Z6(P{Fns)OfA15q0 z&*n(VtA2d(6`D$DFE5n6Rw*pEdS9-wJn49+&>Z8uL?7<|EF-0E(Qoq+s71KcT6bGp zyu8_;CdYSmNN%dTn7h5vMAv-~=x?pfP@(R&tSNmj?=CJ&LY*&ZFe*ASz5Yc8$#x>R zP%smlY^j@*zPe&*io{ueI?5MOrn-X+hn8azRx6A5K%Y}~jA>gZs~kJa*l}V$LngmB z1RhU=C8TC2G212~11{OG&#$e+&tj`#^20!XSrbDYMTi=`?6LAOOq;fzZLPW)VHIKJ z!DWh<2j|F1^t8p!;uvv?8RH!Dxk40&x{4*>dk|6726;Umo;6^?&wo+~gWn@)t+vIlx%^xZrg zuANRvG@-!{#}=livBzd#&TL3W!CM;?P4;Rn9(G`S2n??jZyNwxbfAX z$f`Yk-CG#OLd3v^7L%1!=k2*j)oE?#bNMd8aSGF}f&Fh82 zH2X>p8ao7riV4+|{_UP3IH^AvxlREF`v79uCA*=f^D%oDJ4G4Go5zrU$*aj(x$~`0 ze|!&12}28WoY+`=h4{V(mo95OFLba1acRM}wDCtgs(lo=eQK^R-h_OAZ=5Gk8&8=^ zgn^Dv{#r3)87c>3pG54Y)hbRRwffRR4Nc5#soJFCZIS_qsz(rMaDTv23+UqYB^ussjSm7*%w8OqEd&R$3x|D2@1-8%7g!VpwC{QkT<9cj>l^)guWd9Xk|Bu~{Zn7syS6|kGB4?h3eExy_cHTh z19MdQIWwvHUER-l?TJ(^6(*}p3}%v-@k0pRuieb1p=63m4#AKT`^#3_r3FfM^}6Yj z>Q25nSL1`d3#E=){6w8$%-}+*#?|Cory}_hwV${WI<-?fBX(S>*O^~5heU@kgK8Bh zpcaW`JxR$K9c(W?2Cd83^<2LjPZAzf6zx53#giY60!?P)Nj$}hXyV5(meq$kceFPU zYglQbnpVc;kZ9jLXG%@1N9F75TUkKIO*$?IR}>~Q8cFuAT-vwrnb+<iEuY*sqr1TbJGZVz0o}G(hvmva$^a|2kfwLlTgli$cfF=rKlVX zARHBeB;Wxg$a<4v^e+9=cmB6`ch*rawa@tA3Aj+u@G=E@=#c@oWT*N+FLoA>1LboN zXgl|@(GK?uQlCefaquBtuA3NXC>m>}n1}*)MMqbECm?9_UP)g>rHbm|DpWKTYCjs3u_)spD}B zDKxUT>WJ7mLr+tv=&9ev}m#}{!~eGKBc(g(3z9QmY@Lc%Deo3}^yM1}$> zs+W%x=R3B%a(>@i(0wWr~1=n}|I| zwfM?3Qn!2&;469~5&$=n~OD%<6GOSPrQd>GUwo9?{FYl`4J zHpE6-I4m)iH5Ns#O3xIlrx8E8`ob`eJpQ3NhfxV02fK$qFgnuhwbZ}9*Z9&+zV6Uh zSD9RUbmL39?xxk$`)!p~^A>89hjx}#gbLl1P*H%D47^}VDB@N(sWNib0!z`Q%z&F< zb=dT}+W4n86Qltn)>OZx38FoHXYTNh zZQHhO+qP}%j&;YjZQHhOd*(mhc`v)0_p;f|CY?H6l}@U1I-SBf->(%s*VP{YUryd& z7DkPl!QN7W%_5_$nxknPs5dV;i*wq?=v8Ba-K%V+xOAy9s^epld<1JTbK1-3RTP5V z>>P>egw@f}_l_g!BQH4zOrxJ@^;O|MOyL8v&BPRkivP1&e#BfQ*xSL;)M1rk40X`5 z`VuMO3VW*UE5urBUmCIAMs<|-_9!dvGIK7|E^3*XK+jwxVU?GJgd+=Buw*|jwy+}`+lSZA8)XupLWPs zL8?l@`1u7zr?PTFd%^nHnOwstRbR~M!8HCV4}z_1F&CC1GU9gtwB@k*(SAMXzIa=L zsabK^;ygO?iP;!5*>Xp@^KaTuam9(jI(@h^N>Najh*e)gEi~h3a9O% zKElFNZ2@wu8gI%adJYr*eyH07@jKc!fXWSfir1*p!~5F;cPfkF=as2Z!*5K(b*z@C z&IUGc%85VUVPvw1T0xWv<1@$Mxk}`9hn^q)*~oVc0cwyhRZ+Rs!!l*D;d=c}GwhMylDZ>Cp$6WF9-&gs`owg7P**r(6OGV0 z!n)u_>zoNI1M2^x4q-+Srh5OL%nE9XWc5CrDr*MZYiq(j<;WJvuc5>>{<6vtCJ5FbJc&&DbIyhMK#+U z_=ex(-gx52+aDF5_L#?vws6_3OMR~84 z|1k6G?a!AB7!C?WS{VXwCf_WVaKE1kNiaeie8n?5mf= z&80tO^srzqY$dem;cmB4$=?mA6(uMy!-f}&mvuqP)7Wk>W6)EwmRp`MAijfs?u-n`m^=IQT z6!W573?)!Dt45dD3oI}FSZIYpR_|tW4Z|Yn;)D#>3C=~9Zp z%F42ceQ`$E+K3o@iBdedXz>*yo)G6>5_Fbc@x+TMM!~wfro@nl6Y{G5n2L1so`qH7 zLgHd^QH@%(L+nMnwBXfZ5%^bP^LzY~^s$JIYbIzoOm}J}F7q~;I9g&CtDF{Dow6;m zJ+c}pmh+TQl7j1qum|kOK;>RjpT|$8ZJU*89h3Eju4yD+Ly2|Ofqr+!Y>O(LTXza0 zDLD0Ao(%>K3hP_{CKWCCemt7y$^=RRN+QysxV=@|vu7XF*M1w|C*Ae zgQ`*2NV8`;|~F!p4){Nksx%>UI{AzQ(=rIu21BPim@uN%SjSYjoN zS_F#_@-=_T1Y#Q?%lWII5R$wnhO_5VykYpkMEqCC1gcSeRabM_M45IRL%XomjgZ_8 zf>YPOq7=NuTc|cioh>3{q~bb5U&WfOQoLCk=2!{BKPl|t+8DJ8hs}_<8|ECq-1v^4 zy;TLD$A7U;l;XS3Y7%WnPFN4E=Rr%m_#4Fk_37DPm63W$dlI&Q~a=;FQL;k}@J=G}g-oUAhOs`PtQ_q(a%y{_ZEtlQqLn|@X=@u)0uZ!IMZrckAb zCE}9OuTDOz_r0p`KB@0Mt^Zm#>Dn;4a@Dxd);IGDdT6ZY0@)_4y}zqffzf*CUnV2I zgyCYcBzmBgFf>b0k&OVuTuik_6?PQk`or&7fFpy?F&CpGP+x#UB`}|l!&WL|IyT%) z#Hdh&+`Kht;mBMIG?fg(AoANR;Itq>lVv!x0Eli>ZLXU;j;_Gx6+cJhy&pfv@gC~g z?K}}b^z#RcHTr9Yr+|Pq@g?nhhA^w%Sb(q@OD1X-tN5FW-{l`OFVypb`L9GHIFa8X z_;5coPaHVn;kG$HXdBl$8|>l8TsIc~b<{HsNE?r4W7I@PbVP8b*4=vFS-H6wY&v*8 zj<%p*V*#|WNMiw_u|?yaN!-v}Xi(?Hnl)FDYXk3}ziHFa&!Ds1X2t|O%EakrN=0$r zij6^bW5l$DW8>?hsPp5R?*1d`Ypw($kznI}%kVG&0a8-eYTk7;kg{-8gIEoh#?z72 zw(@wOh}YMTz*l-0+EFYRZeUWDbv5V&yeq5UV|zRdWsUI)SBv4%RVrp zmZ(o#y#h~Jys>W?zt|hGpF(=spD?~PSyUUPa4tvHT8s?iyFJdEc?tGTe4Qi9;ZTN% zL$|fwWfkmTK$f&V7iW)D57DrRTZO5L z(qqNMiDSm}XVQq@%i4~&eofH#H>lWPAxJ?n1+s@I;Y5CUCLbw}q~XQ##4@%i>m_Vr z( zL`QRqx`)TlXG}lrfs2?RH`2_7Z7lLb57jyaO=5Cm)Ahe($9WznGA&{nE3TxK29c^Q z{S5roQUT3BtmnHR*7MeuyVj*SpERt^I7?Lwze_Lky%k)G68}|D9rs+F5HedPj5kI|hOgzWoL2rB#%yD12J1OM9 znuQqYIRMD!wk*mMpfR?T1QjAG`Ba)Wpaovg=PemZA8t`w~shOp&%f;qJr5v{01KI=3lanrr*+YM&$d0R)KOM>fmCp5=Zx% z!NX56YV}&ZE3p{qD{q^fSZc23qf#V z_Q|e!doqg-E3%M~!zuFRf)E99TYCk!0eG;?U}6neZTb#{)-`DsiOe=Vv`8m1F_Cjj zd=Iy+p>z~jv}!PK{IQBX^WeD<{3q1&Mum*?B#J0!sAVw9rt#VRARBsXH#6_KCK$#h-Yw&7E|>3fK7(Ggm$p%n z#FqrddrI(#FDe#AT@yKY?$6O^|EFvJtIp^D6Gms`VBlc=-!wV{J3Tw=fAjTKmxsHu z$TGWk`StdDx5?SHbAs%=F~M0J2yj~fF)#u|1tcaUS1SUmf?04Y;cj37At$ARn7w_T z1UynQ!}U8<%tUSh5z{^=BRwo3<=L0)*0r$qvCsC)i&r;a_f49X&dFkzN=dR($ux(G z&jJweFH{ij-omn>q86PtE9zeiy9*dh=cLRt@arqPpWMGLuAJzyw`f;|$^5U7k9$~N z=1~Itu-iXbesPJGISRb@N8;^%?fnuTSPyPMoz~rkRh9n*Y}TNPs_Xn3=w9&w%wG$c zYu%Ucfu{Goj`Bd>+lc#1M3>p`xl_${F2$%Dd;(4@7|wbIQ}%b`^PbOhex#cA^ZmoJ z+U*y_v&EW-)bVupw)Z=66G5n$O)wXETG^?x)EwWox~9 zZ;||8Um)}yK}Rig89QIO7nayFm$H}JBK)l_r@ZjBH*QcStAd({PIkS(mWiO{DemL6 z*f{p~!z>0rOeVhn9>d-gz*vzqnox%_PM9#BvKL5fmBb!9FW6rUptu8GatJbtPqsfw z1eyoy4DpmJ_ZS2O4H2y`Y(AfhAsxP;55y!`0cN8wVh)Za2)>H66QmSGYftDL1eFKA z8Gh3Xg&WKZQyjo{M^hLCX8=}6Fd6I|>^sVj?k^oIa7#29$iI&q4Db7UEiQx}ToA&E z0Lwr<$RjR@9?ua0m7m=KkuWX=fEj9D3;zcmRRF|*0a`#EI0PchvKBc4i7Xdd5?<7R zyx%eSJJ3Krq84}@UR4m;0OcPMcFZjmIuHs(MIPdwc&Q-!pFO@juuvl9JWKun~5~lX=6jI(mSoM%w%2w#TC3}v3D`s~Z z?SNZOfXcp|J&$&{Jxza4KTIv=9;bScJ&!Z~<}WpZPK6-0J{AbOP%EUcJ=9j%J<(R& zy(-KPm`WLMh?YJW2)nQ+h#P(Y+A0VZ{5B-pe&UopQh&#;X|S8sZA&e}9*;d$ztz?htt+ zHWOV2h`~Si7eKt=bAx#8F$BNwIt0HHBm1cK!HWP^QXLvAfjMFG&4f&l7%w_T3a z=vxnW#676lpj#1`Zji3bQgMdHp=cH&6kw6Se~H`>Tcw zwqK7KK!DBfjt)!sLDd;>Yj-;`2m}xh65Evt#v9Or;0thr_bu<6*_8|b4xHJ8@2A_{ z8oao5bBEhYxit$G+w%$j_J@PV4eG(?hQ%ZNAnRY*rDBh|)w^98tgr{>6$CVa$@L=y z@AQs9$n}vy^n}PE)%3dEV)WnaIR%&OK?Q%_x&=D~(+L503j^@7zN9!mgNqmB%sD>? zXPn=9n6tiWa?T!Wer!_lLeISO8y`#7mz+$=x6;Dd_u^00uXrKWit{t|$5{Md%&DLA z+jqR|_vyE0$+ybFnctH0n}JLT-tvDMztXcuxbK36Z#d_-?r*q@$Cdx6=q;U*v1Iuw zF8o(>&&T=s?Z)|8@O}FDpO^Em+%hF>aKU^7+*OVR_{A#j^QT{y(}asbMWqj+d9oE^ zIL@r+sicd!4t?(;#s+@Oo-{g}rlS^+; zeBHr3an(aManmRHN58Zm%Fy*P+57L&i}ilRMg7Qy>a@as3EzwVY9DhmkC#QD!-!Z< z7Q1$AC46%j;*@u@C1h=+r(MiBfQU6Av?qGC*;ib&BXUUMi*rxV&RUImZOOgN?#KNI zAv4D#HGDlAt($3!Rj}HQZg!^8oI#)uX^msL(Fb6@+hd)F^2O;DkQ~%iRn&D`9va&0 zTQsJF+7tCCuorR%V4Bb&#Ir@tVVd@NVoG!dy%C_Xyae5nKT`<40qV@r=!9{a)7;z8 z=v}}qL)aN+Rc)hgk(-%eS-we1*;Wcj-9uKz6Ya&>vjHfUng~_SxX%_0-m>FkAaC$? z;!*u7ezet>DfXSzk9Q~c8^KNoEa6^AGr1&w38$mH_l@$Do11l* zkIMj?yODi6;%worzC5`@0b6E#u^PcLcZjB)4E6VpQiZSKL+}ec0n>zsx}>8+Fh?$r zC~moJe7#+A0VoY;kg5UIlv$I}l8Q5%M_235U!g&dJa0gsT%YMkt<<*YE&n=lA{}@( z1+I~{`%msgKIDB7`4Z)JC;$C1gH#F$3lR%83qCMxVwbZ<<4degu1?aspgaAR!rqVE zuXAS`uZ~J^26f!$ZKULiL-SJD=nlp9Kr`C|nGTEvkwAaf5y2EEO5>j2dA%TkF_-*2U+g{*pl~GEhuxl=YN)OWBm2mjHU^wl8kZ)zuD4bLMinwHj6M zjuUWy)UgYJm>y8pM)?lJq>XtnG<;oUBYV1c6{RTmhCWk^>ynqv4r3uaT1A5h%Zu!k zgL{~nkH2NLAky{4atr^N;rfY6qgm&Doar!NI?P$J1a1V9-Wfd5uLggRpiNm?;)y*3 z$A8ddY6*Yp!M5{hZ7JJOeFnkF<&YU;u)!~Zw1;Md=q`J@#8K8OXAWNOM88%_b3A7! z%muEV30`@;#XufUH_4M09HJjiJGlR923mBfYMWWv^LQ!*X%pl6>j;CyQ4T76pqPhu zhs=cVkSi1@B>H0NP40~N{VNqm-v2Y85}G~q=ka>FALW5Ta?H2W%jr`g<>8 zMu_Se^ezrtG2&vc)V{1%VU_k0@e99`7FNVoFEV?{%Dh1?cl%FV+bNMZ&<;Gt$#7&d zUBqYAU^m-wL3UZlY+fxLB;aFnK zXd5YY*`kLJ)YM%lX1AF0(Q?Om%e4Fx-#85MlI_47N_E9DrA=?OayJDd_SlG;J)ya} zr9@1%)poN&Ibk%=L8SFaU7uT}Wu$^QFPQgS8S47XV}w0I?X22)q?;5fka!4xg!Yk; zlAIY@@NA@kbKLTuB!l9ODW3bI4o<G^ZI%rGiB!asDC}}b>Wll7=Fz}U*hJp7<<%KM zt@)U<@}~L>2Q{nz!ejYpm8PC8MuGZhEsP*R`zS0-r!Ip{CQj!65kY{3Tnq!QTe%Pn zz-4rDWGc2RhiU>b6l;0}q3KeP!eYRICKSlsZYZUNC$b56Qy(abTwZBeGH~H~YG=q} zcyI|3(l3#XmEb8hyq15L-NzMa1aVn`+mR4pH7qggme=u_#40!9`Rl;()NnGCPRh2j zQUn@A6Cz}2Jh?fWi5@IEGKo+H5jB?{9&Tiquq>_5fCXa>afvVLHE9uvejIW< z#aqKMJY{QOqfWGHt25~{gfIiwqTXgB{jkpDw9xwWlsyFG+Tr`T&R=Qg_>k5n0H&o` z3tL#k0b_H4ReOdNA0Dmrdlj>*x`;!xdVut|HLVTh*bO3{sT?;Aymo7a;6%Lqq^kgH~;#PBt9 zni6Et-Q?g$A2&@Aidoq(MNlS5rC6Q=IecC3=C9{Mj$os5V|`ze${elmp<7D=ag>ZO zrh4|`C<`q+jOlEqx70WzNYG_J7;>Pz0=+N5*ySP;!^k~LEpcL^fWp8>fHO5>I0`a7 zLj3S9V#oR3r*TCdOiJ%;@^yZRHd2dt7% zOtKkk*u@$%}_1No#*Q6kiGUZ9$;xws(GGv*9Ms4Kr2Qa9H3{#<~nZDRk$fr*9 zFu^h&{D9EZA{%SO=F`AFKdA+xF*Eol;ut1Mm{0!Xa}#U#!;u#vU+3?FZH#57RXyGm zR@sa;->z#h513N_Vg+uXRPfuK3+Z>y}{4K&(`9RESsx= zLF&SxXp9E~dEdfw*9eI13=0H;faM>N5%s?4_W6{`tB@S_ob$}hkbI}o7EbX)$&t7Yd4yG;tm*j2^oAgfxDl)iTm< z6uH!JogG_}6Xyvl=RMDrKhP_Bk}HcMEy!Yulwp$LQK-s#<271;srBD!XX*Dv+d-Oo zy@t^ms^D2<>Ax1DM8r13($pEvSXtn<4{rlXDw{Xpxg|T*xwTy8C1>ZfnE*|g?v)06 zMP{zH<`Nk5JHyS|QR)3rgC(63r$Sn|{u_(K6V7a>&GA?Bt|l305=SH=*# znkHHs>MQEo5v3*xtWqe($ee>=*S!8_ai^pnYK~GiQ@7MNb$3yxNuwq=5Zu!v@!KMH z2G#i*CD!M$Arni5yV>?1!QniK)`5Ey4%oAH@3Y~;fb*#q!_`exlu*b_<*X} zbxFxB+6oawAr1W|WoRd}@C5UwR&A4_0m@Jfnzpva$Eyk3wbt`1xBVRgYAZ(*VX`EW z_}LZ@HE-2Un-@}NmzIQgSG%`P*YzQX&TLhy5}jb{61f#Z@QK+OR(v={aq+CDV6&DQ zwkwn`kr{!RpZPh5o76`7Ej0>bd*|NGz4<|F!DJ52pS!M~EgA0s;{fL%v4Ll8+Zyj= z{K)K<&HY-UgX}Xq>gsHR$r#I6oe>yAh>3QpmDR{-m>y%dFUnLtvM;lbG`&7e z(cz800*eEL=kd!{68adIQ3&Q0W(dquKajvB-b5wAl5yF-iBuB47J^m<;rdd) z`wn}X2VY?xj_gQkJiE4r#+qPq^L~_i(110{=;yt;Ekp^CnYyb6MY}yaLnrxN=z@XN zNs8ZJ=$xJTPVpGOW0HHQn`}-iL5+_?4O+s%o}Mgnzikbt>Bol?z1xb%8Qs_YyS6PS zOF8BvMhoHIl|AXxUI@Ahl~DgWNd|WqgaPG&kkB5C!2(gjds^PG8KxMPY^zOE?|DF= z0)aAEkoiB5tZnjuK7HorxDge+5iWLZ*~Q+1^MX!T;-ytH?!Rna61Ylb#w)hF9>o5* z^PN(yB4W-w!R4%xjUt|s`@}hIq1Ac+JY~E_*Nmynr5wtRj4y(bs+Od47IO{H?5a$m za8tO=&8y^*_i7rs%~&%quoOaxO3lV4(wZhQ>Jh2(D&Fymd%hfBt^s!dSVZ129{0Bo zyZxJa>3bLLSZ;Q3HDjRrUKoN?6*28xJ&Ky7n@3xu=CK|&o0gY!SFM+g_e$_f(GvPe zO|!QCCy>vAqgDsNrqf3QG>KbKJXTNM-fyW^oYKtAT87%uxWfZ*;Tc)KKNa#fDTl=J02<;z5 zLFas}_}dz@o=`OYMm!a_pc4-lDpyoML3w}|k7|?D^-xwu*xe#%qCg+!26>hDXG$%h zVpSFlkvD4nMVtH32HH>Pk$25B&8;gOpaM~%gsiIwVXIC8ejOP=m@)O&ns8P~JFh zIt+KRJTNdq=i9>xY%1{0m}+K6QTA8()58U?z`$SWva)P@Z5LjDYNh0v>FDkmXc4aQ&#WMjzRS zM7CO>tjHu5;-tgc$Fzj3=Kkhhp==4!za*oE2_P|8pmbDQG%jIE*5eSj+*wAI^3#G) zTu&m+Rl=syveK&3uF?W?BL%Z~Y;sD9e36*SUm%wxu9i1>SnW_@vNDWe9)FOitEA*a zQ&U@TmpZ{vt5k-nq*z$arJbEklsVEcpBNVF7N(iapinW@L4zt8i7H&rfk#obk)QP~ zNKMk`haO2Nr3wCyw>27~Yr*er!kfVq+d2wmoB~GhG`O6`DlgcYxBbolfDs3xF|Xsg zz=jzMY`(+jtHEUq3?7xP4-^BrVGp+{O@KLZr`=TNr&KiKk=|cEQu32b6*e7&H?M;f ziqPfof*C6P?*7ZclK$8JjNPD?MaUJZ3+xMRE&T>S{ypkT@|TtDn8H1@$FR3pw^dQC zVw-LpNyp)jtk$)+X73S%Vz#^xX?#a8S5ivb$rEClA&?__%z>r>c8g~|<}wCCVp{oz z(&z-~l5u1@rdiq;F%v_DtQ<0oR(84x)uO|(Q89)+bgiVMH^~|C7RL3zzZ&;aB&EI% z_w5U+CB-Hb3*4nPisdID>&-+6p&Ewfkd)Y_=^*iNpsB@>V4JjVilBKd{+MAk1YdJ^ zD?L#?cs&8y0{Mf0+s=7Fjt`v9+inG&i*9>XahBJIlOt4`1TBG^H3#2v?d3Y%{?1Sj zz!7Nyq@}=nTJ*Is*QiO9W@2E-C)taaZwDW_wPMpqho)MDHgBAV+Qw#Gr=PTIw}mmU zal>&Ebq#Bq0D*!e-deuAi%1-}4{>os6>B`VXi$(w8tiuuc~B7WsA$_idIHA|?NViu z_z-!;U{M9qpM#K=g($L(>T?UKN_G0ZbdH`2zr;hI+lz?0hVlx!bk1`~13AxYraQUs zi*PH>6IN8ftC?&{hBZ_6PPfksa7MVqD2{nR?1A`GtpW}OR44JtU)COe$zYsPQP7YB zr&*hM3J%XWlDhZ;BbQtxJE z0?FQ`;7CcpT>d`DB47H+Idt(pj`!5r%-P=8FE!Fj4wRjD8SgW;)pri^op*)q-=#vB zluTGmX5uO1DVWuXH7wdE-K;VQTZZXVHc}G}#~%0I9%=XLpKTw^CdFUCUuqvH*(lvx z#aqc}(RAux#;=Qp`}0G;kj$5ED(;Q0*d6yJ>5x&d5-qe2h`H*R0-BiuXzfwbIrwER zvqEAAnlTpfAnX^>Mk&YgHMZ?bVp3gvb;4s782VtVx9^GiOrh&D+1r&DdL@9wUU@A| z-rOw;HI*pR;}Y+4QHr>!u*k5tYU_XHBsi4}4QK8M6%IyDY>eT(+M)R$)8|bM3&*f4wgh%$cSLqbwQ03vZ47ixo@F1yI+nO>if@_6 z& z9EPt&WFF&g*{}P>%DYL!ua?~#1@IpN&nzRwqbnJ;DOZV)}LSYv9FfNUlaXMvlrz zrbOxRN3JZ8tuw;J9p&Lw@$Q$WhBb-1icey<%uLe-*JGp7PDu%nPk8pZ35iM>D_?P$ zQi0@I>~iRJR8xQ~dUZ^|QGgqJG3HbcKSx=)rq*C;MEBZk0>irG*=iMC3fJS2W|-Jp zME<=p2=K|iBuCN#R_n!Kv}4I83Etk|N$c->FGjT8h$C_~!EEC1&<#R2N3KKkmw%OV z?ad}f3gyWb)mH{f>e`q^Kw!jT9K3BxDuE&joG<~Z?F3BzZV<+l61d}SMFh;$(YYq` z!sU`Da~KKxxfb&vnsi$M7%Ew0?=J(&(i>->29%|(mj^tE~~YY|t$D{M|0)el^ z^8BodhFZv1`qN4r`Wy;+Lqolg%CLE-xHmpz<|U6X?})XHDh?vz#*0-@Ht!XWZ256@+HGG{0pvW2l;D zV65`EIWNJLXu@T~HioPe`#+ZMh<7*YpOdx3vT?3H#WvPiMttS42Wf`j)@nNM%N>S< zC?zg?$FY!Zl|Lc@T!%rAT6V48j?UWYRpx_H6L@xhu6=4X=qb}QGvyJ-$aJml-=Rx4 zdg2fZjmg#Mh`GcY&5`W;&@q;5kfklcUP^-d@%UdZ6u|bUyWHdVj znOTww(DNhcisl}@At@6{tD_IvD1vdA_O(xC`8aD?=^3HW<~BPIJub*FxzpF>Nt;mm zL{d=yTHeDOb{xZ6wMh;~$Qa{l5H=^W7=p)J;HRP`DN~DiA!>D09(k&EZth3R{?jaK zbFOQQ(xuoU<`H<2lmu%M4`Ie%noamOZjrFO0J}x=ru5iqEPEx9$71o9(8N4!JV+MN zxt)ETFALA}?YAxSoReJk+oAI*R9XhRi|e`XFG!PXxU zMC4K1ge&KMm*c=76!T_XR*G|35I-Qr^Wm^o7=(gqMU4*8wu@s+x%#*Fe{{5a) zJ%?0_O;t$Ois#9hdeY3i*kI|1hkbbtq;ZhMDX%FP=c0;UH~q`x%%si4O>?#*Zpgvc z&)%<_tvbW@L*74Gv`D#8a0Wr@Al(u~c7TlavD>U%OyxR(o9KCwjx`=C(W1R{zLp0x z3V~}eV6aHo(pF{LH}b=}Q}LvD-4wcE>ehDdz9t^w z%$)3>+@CzpWR(be6?M&XY_o1J?OHHUvx*Pcpj7{XYmrnp(WTP1(>?K7ZJt`P+PG4b zPX8zp!SL-r{+m7?bs5gaRBq6~V!0{gK1(oTt;$oXj;u@#Zi1!o=Oq{QfPrc^8Ny34 zP~O@Y(#Sp`Jk{Gmadz*CW#!ABevNa_{BUp%VuN?@n!(G1vXQ{(vYQ65Pqmn zNy69sgt`{x5-C;Z}+32%W;szkQK<^9WRT1W9C%uJkiFI?EM znV~H$v@;_sAtTx{uS&#vu4y%4z8*v5;9q{DnXB>qp7tXBCdO0D&uaJ}%VoO*c_=SD zGFu|ra$bs*Nj79U%Z~9Fk(9!e31DkbkMyq#V`?dgNZXT1lnYItbh zaLZ6jQFCxPS-+N>g?|&Zea?wfNg1$qcVp!??Tz^2J>#q+)A`xF-%e$^V}I4}VRggN z5)wQ|nDO_;)skbY`}0x?_iZcj#jfvedEb42oC;Q)4DLA)bVVYl`;QyyuuO7Th)sLf zxUNyGTr!R^HIJ0_tjxIgYOB`fm6yUx!XC~R!2rb>Rsq9S=C$$yW4`Q?addi?90wdb zM+qrR8VlxdbVx(kb|K~@sa9gyl&}mgRs(WJmxY3&>4I!3Bk)*aapebb3^HjQna)5t zMv@8H`}1tzR<=n4K}Ls1*VMpjR#ghVctn;20p)aNQX8?86lsnH?0EScwn%7+v(BNI zn5@{GgFFr{%Z$Q-!p~w?zRxq2botU(rsr6FwH8Uef0#2U#F|26(td?sK{-FYCKutC z>1w%pNyjGiP&>*N`BgKkI+6ibX)=*?NVDZshVo%TIE$s>&y8_%^MvJGjLlqEAC4f# z#uK%OjX>&O@Yz>=Mp$imCy7f5xIvj7qHKelo4??<&{lYXxSZZ4;)M zYC1Bb|Moj>{PvgOUbFtnnLXCaFili3QfS$Z8}tAjJA*qSzMC~y zghnp@=FMX0Jb|ewSAA)6HO{@R2Z^>3XkZNkO(7ja8Y@qiOFm|XLWp2~5 zZv&6g;-FnvByO7sQVk0a1vZDdGV@k)M17BKAVJY4-K9EzABL0yl0(5+A(s=%lI>{ZJ zR8Q%Xh>$(8KcDSeyl^xTfiV_yBPR}^Es%TPUP(&A**ZOYoOPXXfa5tPz_mYY&s?7$ ztk05l+?2@Fb(2ZxI!!5ogY=MDbjJ<4^IM);7-s24pO1P3OQ#J|P1v_x6Wc*AbY-$Dab*mqGD z?8ueSrso>>I$CV?MF;2XtCE^wZxqbxqGr5(TCf?b$pLHy$L4(ba_78%da?g7KB1-- zCH8(M@9%K!(W+OZfNm1|fdJtb@R1{IsmB_tAm2x7^Yt z_JlQP?(tpTeX5KO0x=d7xmNSWg(19!ZYyb&ZsxLq)i1 zET|aw-NuUIbR35|rK&^a{bcCmyhQiov02W2l7rDn0M21M0PC)%rcdWf!qMVm$x~o(?MVliwH1qhE5?&{YSd|nccJZ2vwRI^X8r{HQNH6M63o(^XWO=*Ugl0PhTM@~D&28K_oF$?wwrlcAo3xJ zUT7o*dDbAeCre@qwW<|64G#}nrH}xVmZOcTi>XIO6170xSsqOJT1s+8e9TO8VPdm$ zLd=Zo(cy7u8r&Qx{PJYE;y^7Y^gGUQzCjbJ5zFwUJ#!^29tI689)utI@uY!YE77C? zJ3K8US-mCV^T{m~`@&mv-)BrQ6ZyHfBVB2f*+EE2k*^c*_WqhQSGQ{6WRuwuppdrAwy z7Mpl7@+o#WH>0F!g9iI(-i<|09VS+`@|OBoxExbCP!*P^`T38xb(dE{jwvr^PyN|? zb!*N{m)T0H$jVoQMIFUy{8ls(Oa8ZS3FkA@ec4C!Cl=W^;iq&iYt62Td8A3GPiV!E z6WJEEZSoECBe~rIAJh$%9TqPNFA5)%X7gCh{K}x&5gK&Aq(_!})uaXs#bO6WBzEUv zX@a+n#gzr~6ml>g$L5%g)I1DMt;@LK1n{FEnwNV&G>%1+m<>DitS<2A^c2THxs1k+ z-wdAw^VfY!Pq<%0HVCVZ;~0*cw&NISx(gA)%7`BKk4kyN3Mp>)&0ZMSjR(MhbsOgH zX9wfZiTc=jqZ&zhWpuTOdQ={z@@%e&!#2%6t()>}liu%Y^E(?LakjyXu`-j!#hjvR z$B+T$YQF59IF@NuXI|a%RpQkMDOTY?NF(OkEVV$5&~R;?;u?A~)qUc^C=s^E8qKl8 z7v=dIVFI@&H)R%ZwtcKcx`*6ht4n`!esMzE2~^T-q5DC1_Y)fjbigdU^PPBh;I*}Q zZT!}f$6o=qM_FIRQ+>Eb{%c666av~W89>#{l7X7GZqq5tfb{|98A)J}%MvO9W_g-d zvFI%!_5%c$dkNz6o6 z%5e>KYoCWr-_|tOA6JvBKNwu2G}l->ZYUq!_^iT{=MtUcAJh7O%q+wz*Dk;8Tlly< zZk{Y&EHO!GnG*~&U^FtKTg-S;o>Y_3CmGQ_w!tZ8P{&haH?C+a3Cd*rU2~EWxk)DY zt$>I2j=SsRejN#$aSP0wKmA5>!KqfltrzUnD!QvrEKNHHN|uSZ$SI z63Z6Ml6Lrfk<*z9te{Cbwj-3JAx7ii+HuNPE~OWjiY= zJ+-1a;DDxkQ#ztmz{2t(NIIY|kzXRS-YCU=IFE&F1j%`L#Ibu&c+=3eW445zmV}3wETI=|MJzDg8OBg>HK=U zSA18D+HmL+80N&$XVbv-cS96saDXB~meGAxIHPatET*lERoKUNdP@jEOJ;Chg_U`E zwTe zMtgxx{}EmHM|**&5zQAnyn8(P9P$!o{U=(QjHFu(7GrtRq5cSw?roLE@wAQQ5LyWv zU1B@T>nGt-?fS)=WOa(CAjcCb(#Q&`faCePaDdx~?| zyT|5-rWf|n2FcQ|%!_98%2WQ1eIa;38lWWnKNx%G=w5#C-?!~@9hJ?s2){&{C5Gm~VJN!FSq&+DnxkgcqaX_|YZOljhcNLlU_ zICkc6jLB-_M%#mVD0RFonvfzS2MA;4x$_=Yi~vpm&H#4)gm)ocYQY5FzB$?CLFDYFIfwDwkXFP#R`%|op;+uKW8V?t zt~oLVE#N|UfV(+!$`im3*?nuyI}sc`cZn3DL}aX1sc+=?mz0D45BqaX8>egT@=4Q~ zwL@}k5{k4Ur&QImCp34+=>tzI5Kbw~iY@sUWS-Do`0@?TL{u82%n7}}+mPog)or)6+ATE>T7dN;w5+3#N0hwt2IdE(u)&`!~$LsX<_V&S*%Cz0;T zx_C1yFCwt+Td9M@0PiX|Fy6TU375pYqJSu~JGFjt&SaVFe+G4;ZQ&U6>JF-V8n#t! z8S6Awr3q)I!ZSi8Cmwn`sNJ#bMFDYckZ{A5PFOI6WBY?b`H7{_*r7rp3{sZW<*0N(aF~RTrHzU>%Pj3#`XT@YrR6%D? zJP7|LrT?WpLm4u!EXKhA@l4o<5rDMY0nOG^GEqz=3FyxEPqO~IOA}PHqGDV?22gBQ zBfMa>OVESyjHa?5YN8hC8aeiiptn5n*@FD!llSYLUB8x`As{)fAv@gBPSh7{bc(Gw zkgmO=k_8*wVgE#Q1|{xexuxs2sv5WjoG~>lv&-dUEmeX)9dJ|_(nW|oRcGaF;^HYx zls|+04G`F^YlMe%#IcMR4SyUakMKCI9K!S?`PoyqC}jl~Ajyt~C?l&|ua@}RLK>?NZn)uoG&7#y=N4~R}#-ZO$T^;`Y z0-}qlUueC9N`C$00ZDdIw`_fcf4sMfdz>06wq^au@cdb!bz%C(@SkO!7-VgdJ6l+| ziGs-!)WGiMYseg^xP}sIXG5V5ohnW;Rbbh2X+zvOq;!St)Y5U_4iJKfpa8$TntcLv zo8mRnBub0^^5i`^W-A7KBR{ZVoMlE1ZwszGUIN*-ioq9rrF;2PWTe}`&Lq`v5PfCc z^(~R~?=4u_67ZRx4OK}-3*8Q9OvAiri+k~v3(T?1m+Aw64}JXY?X-!m_j>-bMY!Yb zv!tuesb5fZ6#w``9wlE!ZeXK1`J8SGPr%A50x!6eiYw;y{|w>&kXehn!>V1yjre=9 zZpkKvx~Q(~I||y9yY=h& zy@L+HZgbgg{8-5GqHMU5RB3DDsX#JUj?5LhEf6mx4(f<~ZcgYqy;9k3oaYQHCN1j= zv`+MZX=P#thXL%+&KthRL|Mmr2{O85R5)vWr*sJt-^k#-^O1vSPIK9XgNUuUL#LVN zy#dC#TDgU-B~IeTs%z}DR+8`e&`&XoEIoSoFEn$cTb%H6hIC@-1`;CBcoKsEpOqA$ zi$@V}8V3RgPspUep~6nuNPpY$+4D4|-LSXYM1+Ra6?!n>G5!jDqvhH2Q9evquFF>S z*|J>z!Y`HBz_e)a_)jUScUU5WT;{XUrFsWVF{LekpZ>$)$wWMEW44pu){;a4%bsPA zj&;7>dbPjQP{N{8(N@GOIYLl$%2uFB4q;c&5ln&A>6YvmF@5!V6QS~^f%QCtfT`IH z6me<-JP_Vg1}TdMfw*uJ@5r*@{uC3Ppb)c}uwt&}DrYP9Qhap2#eCntk&4PJ*|lbZ zpQD4HEtd`u_{ESRKMh9f{`gvAtczQ^5qS6f<1ImzC&}(#g%0c4s0T0CX|J8t1A2!;GuYW}}>AsCHJ z0~?K`WY$oOYk==mn#P#Ms6&^a;;g67!!T_`)DC3HHCBL1d@KabR5nzQxqu5B9GQ{r zGX(o>kY-aJs=VKrsX3@dlH#+1{x=J?5>q=6Cik2?N|fCAIB_@nEnAMeEgOhKhkOP2 zVmw-f84Vk}ZZ%%zY1M}9ueLK=<$54E@g_3`*tnU(z@MMm_pTp~OmwfHw*LBb!wF-~ z*p*;49tCpkgtO^z?%aeE%i6MiT&qHuy7_W@EIk;!d}`C(aVng5^v_bgk}@lbpAG=V zfg@36ichQZ4coMC6qaM~`DvS<+_TFWC_*jWJqU?mm2ICo4g2`C1|<)ijr;Tn%IQvA zJNpk<+Ojb@48b@91SzH2f{?D{LIhvO#-^rZ!VJg+O5#4DFn?OqyWGqhqwiQ)NjP@) z_*c~Wfs?VcLNfZ>xRIdXeepmZ##>N-9)3eTHOB_f%g=Yo`d5YvTtc4Re>OfXCkKwL zmU9#P&LiJh;z}M3n(pL51tYmv!PV6jVPF+#bp-}Az^5rxdHRLXpC4ILR+#D2x2{y1 zd12txlA($NpG6Yiq-8o9MhD=A-`s{Yt)nUJPpi*zONv#nw|!b0)kQR7WhRWEKLLmAq)Z(VL2%lR7f9g zUD!uRfDDGRE?1jI;&hurnUbLbAC$C|6cnKrh(hA5?<;H`!@r+n#;+Iz{P>cW|6X5T zcP1SUkCTs01ozV?`iB#BqV>q17%&sruxjrN2SZZ^ZVZ72BUfrifZyvXDI%aE83Ys& z6S+oFP*DZN0$M40I|$HAV)0p5CP|p*75}Heii*lg>bw@LybhR(e2|qeUDI;22?ha8 z%*2k-w3HOVJc{lrcV|UH0DG~oLS$0Z+_i9p2}W{U=3Jq8TyT9Mu$cOQ1qS5BV9kUI za|%XY1CpeV@?X@z3no9))0c*PhT39>x-?0%ykPYb5CJu&Qs0Y}dSin<4gY%LRr<}J zidyGoBw!dENRY_dVRAZ>1XBkQhq0=rGV`MpDc(jfjDT$Wpy%3@f3Mn!Ws~RJhO71W zc8`_XZ_)FXt{ZaBZ;s4=IHo5Im{J0=297_1j}HBoV{Z!3*bMo?%>m9eep#(JBZgmELko!V!a7+68KJlA=Fu*=($bi9 zSh?Gg4p1YoG;x!EPo(VuzTU3LC66n9VD*C9W6_7zeZHmMbXdNTQMKvKL~?aH!LfMn zQM{054f=Td8Cr3J`{gWxtRI~!nFW&V?v z@0J;$14G;>hl@!DbIuVk4fQXAL-=M{>E=41>mwq#zH3IgAW^Ni(ocw;0gb?>ODsUl z>~AgG(T*ddbKnCcFTVHhhLXMh95925rSp^^)71S&hw!9J*bpih8EZ1-;B7?5X zKo;nFcxsjkN{wyD81pnD!0#aLaFO&+{lHxP_x8?ynU!5@{MPpMJqlH?w$6^}C#B}4 zflXjp49cN4$iHC;14vhD8bJw0w<9$J`NiEdbJff#T6G`b5C#{0V}j&D1)G%^ny;{% zuIQCtnRJb+h3~5Bk}Me~E$&iTR=69c0&TAwq!z9p8&*RZ;m1TX0z6?^VQ7hu%9m`~ zQrvajSTuTroDN%`I<&_^GcWC2Ghuj8)GZB@$u{Pu>Ux5f)v?%v9!*4MGZ2bHOyn*p zZ@;`aQN1c0*`299mwK@~XlCX=H(UV0lYO%;29&MaUAYT_jACqwmYb3F&c@XNS)qe~ zP@7w0d`O$Qyb0a;5L832M;$IINPag$Iy3S;aJt6H)9dxg%@Wji*h;Wm)Zg|R6k6LQQDL3Mkn(;mv z@!l7;yQDY5O``>I9f0NJv0r-;zLe$X2wKFZ3{&G&@0+Kal8Yf0@yS#InmRYWE%mx1^MT-9`g-WSRBA+OvLh*lsNR$e(pV)taVjv2s#D9V%3aOM# z@Sj-xzbQbJAQFfIk^W*o0;B{QPn4(lBmOABF#~}pXhn4dA)6^9MQO7%mG0nj!0sV zgrfp6XE@34gaZSs6KWPS1(d9R7|Z_+dW-k?7I@1akuSVl7_uf66T=pc#84WFCn{0= zkx&%IqHxRqgiutb@I)k1uIN9BDL|&=O88G$Bjsf6|7*B!ulx^vdD!kqmPrd8%irV6 zH&`!9gO$7Sp|!!;SYNC*|88n`tYGzlQL*&E+ObC7Sh_$ym`?B1)2(&OK*$UMAr6ou zv>0v%-cW@_#c&262*!b_m6~0enIwM_TqWcs2jEWYM5gyh9HQEUy`@lG!^1?e%47@0 zWaA2~0<>A$z;_v=EkqEOEXuL4hFuHG%qJN|ELEEEU~#AkenX8ZgPDi}w-@^_Qa#WN zSZC4!p`vihWvU8aQG63dqqgrVmSxy(eHO28r*Kbg3qO%6_FxHqkd9J6`8H0@| zMTcNGF}yXKgK6|=PD`n_!y985BN+R$0@ED<)~g!Uq+?91pYO#C_%Y{DXM{Vtb$5|O zcy)Ir!*ed6W^b3NOxS}nutjqEGQ2=#wlzDzDe*gKRPx!bV(?E%H`^C#s@Qd@p z&B^)8C*;|1SIbYmIyOc8y^cA=7>0j$JF{`-Te~Xi;Noffkp6C4XN7owKdWy0yos#i zL;bhs330{d_AdQxEd|e>{uTap?q%+t-E{x4?$`aty9RrH2+M-F>w8w+fCEP)fbLt9 zlUEBJ`y=gL)o(=~b!GGgI%V9G8tR!5PQbF^k#KA~5P(Ovzd-F7u1XspD~(<3R6I8i zLHPq%&QJ9{QtA>&|< z-slw5!b>!WtC2SD#cWIJb73!j1R>lyx3#hajys>nl8^b5++2}=g)X2C zm2+$El*62aSuY!-oMpVutYuV8lQ3ps8Ld0)t5}zoSzg~$la`5LWqEbDUf%8txD6Gq@vkiUN$tv8G&qa6P;*gw}kz^V>ltE zWfZn>o(fMeMO{K$YM{|Er8mTU$Kru>Uwbtbg9eGVU*D z&dMkoT3qerF`ry5HucK$b-?w#@eM&^1ZX0qe(yE!rPZWRtJ9eqn3`}37*`os8QWlZ zY0D?pmA7=Dd8@J9MDI75bE6jbkk5r>AYOoK_GcQGLm2w?7M$5Hlg`Q2D;L;Q))lO$ zDR!9fYu&J7IN*z`yP#UXn|4%iQ-@vZt2v{mkuzS&V_w~XNw22Wqe@KJ%OO=R3CY+@ zt!lK@7ciGL)-?upW7K<5^F7#^mNZqmQ~#zWn*5CsFpf$a|IvtLbNyBG08_myml`gj zMKsxbEwNi#h3EZ;@a~s!TB((8jcTuvfl%UXns9?X6LW*3zlCX}$fQnrg2CN4@G2T1 z z$I?%fY6hKeXLd_B#KD^|E?-Xwxo$~Ajj6J1ZeV#}r;gA)Sv&KGF45woJJ@chlttij zVudAkDYkthn8bkQ*Kz(ahE@wXqkljNwo^;^^O#cI1DCRYW;O-yQY4NB<}ri9S~D;G zNAEmV>6<1Ix2B^N)%TgMi z+jA^F^4(ae!y>BqH7cYYTAV@Tl&=`svfrx+@ zLXX9c5d~rsWt21IAmbzh{aH!wH3W*S6l_>-TUQUdtIMC z=h~eeec5hSbokh1a~-Q6FaE(I0LmkT04f|gMAtFrQ~(x$(a=(6wh*~I`PbOncW()l zdX9Z8r>XGxbV?6g^*!_^j-5t#`}|5}kJRT1B!I(d+&|SWnb3a^%=2Pt)SIDt&qHCtt<~{O8*&+`FqLKT3Qf29s_RpL*;A5>1<}F$*M9lt)%-GCErac`ivdG@ww9L_HFSVsLj-=lt#O+l^UvVkG;Oz zZ!5Wd!D&Nvd7qPyKCRv0Ymc;Er47-bJa>s+gT<%+vQ?%Rb9NADUW(wuvjI%R`$>CP zNP<-If+N5W%*zIK!^481_Teo9|R|18sl|PRCV4A!H8i3ena`;xq}QC{u;2q5W|E? z_aMrIaK`OXV8n1>g%L*k{;rrar0#NHpjijq=V3YUz=&4`AlA$8dSgUrKo=sM8?fww zh9Von?2%xEX}}*7>h;+(MaTKTz}oZep_GXNCh8E+jK2LX8Gs$QWJCbKNeJdl0IT-A`2lMC?^DZ zvo;vIXdj?SL%Kbv^su(XT~Y>o2UMEyZuqnTp`m|u6s}lHeMUo*29SG)_K16^bttz| zq3Zp^^}T~y8li1)sl+wr?yOZJ8$fA8U*On=sC#6g%YDV+%L8XC`i9I^f*S+&)H<>5 zxH^bl@GbqS;mgcjz)M715a$SXLf+`MX712BG49kh0$s3dL|brK2-gPg@~#^eL( zM8pqyN5BtyN5l{NjSx2wRL?)a=nioU>5g$L87kkOvx04p^EC1WBrnXo$6N=0tGN>A z4x)vS8}uuDc2ICA$sXbsj049DS1%Z1kCG$s1!*uXAN3a1o#Y9t3o4GNXXM7ko#u(^ z12~qb2Y3|Wd+bKm9pskQo#_ec12RYGgL$yGZz0@nkU9LiudzODS2y&!A9AR7;6B_h zC@I{}+#O3#z#CsLx*HU~uY85_1wl{fBYc;C$8;!eH|2+Y1lbk(LOIChj(f}e@@wz- z27C`4rZ_|9buLZte}--t!IJ9{dg7UfhlP zP%cKx;5I_xjoT0gpycvf+o2=Z!PDQt6KnrrhZFFHyZ>+che zkDTf=$bIjrJH44lFYp@!zA*P6*hWtOL@%skl(sU;*%<@2MmV}J(E(qO`w#LMrG5h( zpU5Mp_{?(O=?-tBKM$D!z8Lo(;(jum;`rSECI;|jIlS47oH}8Y>W^{wT-EM8h(<4< zGt24URdV_ae}k9mk8^xVjhw18%IUx3%YDO_e&g>vBt3|3wb`h~>V& zzL^2Op$>1DKh^yv1ALu-K78B1`Eh)H+X23~_8*opOM7QHK3PXjYZ>MIVjSMAdovgG z4FO+J`w!|ErM-9Gkqh#Sa{R*_pD6ne%0DIjL>~RJ7l7M_sA6#w>I(g}K*lnCrzFEY z#sa~{+=s_3RL2B}<9TPqv+RVp*MZ@G*F+7Ua3M7uueyF}b~;q)60f-z=w4iZFH|!w zyF%I1IUE5s`3cPQ88=ZrVOB@Y#C^uRWB*_@R`yR77kgyO#yjoT ze`6uW*QOSd+KI@1U|eF+M&JmPXa=}Cv-1ey?V}N3*YtB*0iT(}5A3h-fmVhi9vX}O z@kZoywzgre-?cdr1lI=@|3P!%8|NlaF~O4pg1!0B;BIsKvqW~@B5fTUXwXFf;<*9n z?Yl1)2=LOnEbV!Ydx%00Jmx-b)5Pn4e2L|eT6J;G#9HE73mllFKkkpORc$3s1nA+xUI z+yun0fJ9}7h4Ab)39eH)M@c?Pvcho;rhq9D=`lw~&h}Ir!q)h8NoLFbQV#%+?3V~1 zk$!xw0uzOq+-wFXe%i>PeY*}Gdt%mj^?r*zyuEF{j~eEJNN<~`MYaKkl0+!F(k)e>JYDx?g8lm@`2|9y)Ly4vMrh( z`X2g#B1xa0zge17a7v#=H%Cfd!I0-f&klZs6!IbPn!F%Qax0e#()LIbu9 zzwkt**M_EdQz6Y9d+dAv%B4coLh-m^xjN*|y-T+pGqfdp19fwB^g?+o&Yaqt7*qMdTa9fV{IcbV-l_5T3eMO&Klp=0L_Dln z=10&q3V&#B&vuodk1(ePfc04X-2H$|7pW0G_e+58SFq*V=;s&v0{%TlIw0eV#&^gh zN1&cZ%L7XLfgl)%w+;LS#3OgLzfC~6LQO+_Kh~mQ>c#6#e2MgQ^FCxm$ob^`K!2cj z=UA2Uj>J29{CYc=ap2k?}904wj`t6bSdZDwbsYqSXP;eIS)uWv=|Kdt> zlZkmg$P&1ho|Tky{C~lG=BCIB1#6X2HIW<;AP-c)E3rPNGhnBM%4MkEd>-~uear{;UJiU-IlcvVHDu#KP7R`vq+OQEZ<90Xgi(+sX22EEJJe1X1| zt@FLRU26t;?L?Fs?4}BLeB3q|on6DoPhKMe;#?zKH zo;IZ%kUj-F3MxTneuenCMQq`?l*J6)C~PdLM$9rUptV*xp@V-HtKe)_@*YbLAKuIu zg`wZry?=3B^tZ^*(q<%5BrrAE`J0tHI#XdRxgBH)=O10zRG{Ap;7E9n_+I>gQD^2M z)Nyq3UT^*Pg65jtHFdAOlRXFo#9fR@V3co*Jf_pvUfS_X$S%X|eamVxtn*k$xvX&Z zEvGYMGiN5T1Iv1TrYblc!!kxH^dxx4OaYeC3hrG&AeLGYF1{uQ7H|F`HFq7<2{bpgPF@>6yT< z3Rqa|5)mWo67Wwg`$5Djy{j!)Xew+jF%~52&Gbc`=5jc0#;AuG*{*FBd#P3+!MOCd z#qMwX7A?RWno*T~<`h{YC%i2;@_KmXI8gX2XB-4bo?XfUu%yJUb3FW!yIH(p^ud9+c>8MoD*FH^)IX4 zo5yFzPP}QZw8X9Snf&qbkx-_tZcEMNaugLU@0rA@0YzqC>VnSPP*AeISzXbLK{nB0 zoy+gjWqEPqc{w4;RnK#|+iyE--Az+@nzS;N9Zpk~t)wgdqdBGOb?2^!-}~*+d=9=Q zL-^%*3Dz#ND;3#otKD)X_}`_SQag>Xp9|{~6A(9C97~*x?cH68#6`#rL{jnCj`)e0 z+_M=;+U@WWlQoB_fE6ygI{zfw;ClRG{6;zxs|#ss;&$JkomSFKYEk#JS$IlGG}(h< zlUfo8zQS#~Hy=chFc5P8g@qz$BdXP_PRd7 zCPCQEWq>?^b8!G%_1~T{@myHwJH~HQaW&&(zd>aiS1c-_!<&9fSWSx?5wErI%v6w7d*PKiZVwHn z8hAZGH=Q;NMYmFg0U6D5jjJwhvaVd8CNChr6-7{t%H;LcPInMc>;B2cxU-C@>t>=N z;l|O;S&k|uPccxv$<8+e^G9WPrYYB~WXbnvovlQR@RHNab?S$iq)bsEZ(-jZDU}Y^ zaUfIi2gy$Som%b$uifWGcF^_v^h%Z-&WE?bd%LmH)_cP}*K#JDkvlE6*JUCv%iJ!j z`2vN%rC{#?dHcFIl@piFAX#E}5ro?jJ8)p$%$1a@<>u;_)bJf!_Y)z?1G{g+<+ zgs2Cf4gN}h30l4sL|sM6?R~}r`SsmS2PrDK06uR<@YV4!nm1uLJ2Jgyl#U7%($_xSb=<^H22k^U#C@BaENlE{blT#`%?@pyGRNYeY+ErsB*0n5#B`%?x zir@Z5BJwjcUQ%mq$^EY7S%kANFh@`%dp)$UU7S%6^FJ<2s9|dF;*oO?AyL?oI>d36 zEFmG66r{;}OT&b@2>@A?tBAtrK}j^Hqs;BFT~Srn?{eOMUlw_ezw&c0dQNVhL;hoO zpz84`4X)$L&>?B_PjkesQXB7d?lQLI@S5g0tg5$bJNSyG7c6g>YkhkGk~ z!zLPdO~ydg=d-rtS+13?LJ&RzYXuGtfThPl_y`5wcWM0jNi!)30ckoplseApRmuF^Lu4K zy#9u4gDb_^!1?rCPIY_Q{-7l%5iR6TpqdY|A^Hg8?=oBU9X&2BxcE+g6FD#79PGDtf= zM|V|(f-~o#Oc&Gz=}_!G30-4IK#5*PCZ} z8L{YGIlKGweevcq*Sf8ja<-OY1bYOR^|l}TDegmuQ!Tmj?z7^>iv#}*>#+H{rK6EE z5y6$S|4>i>6*)&xW>rv#l-Ni#*#{j!Qv{ifH8vka#1Dy-K(ui%SU^$sk%SZWY7Zr6 z&Jj@!%2F(c%rqoxF*7S0o-rNXsQeE~%R4gzIjdsNZ0JUZx&ROh9z5g-sDPG|7SlF- zCsCp(0`u$W48TYFpk&JGrKM=OicP(Wb?hv|4|eM10QIUF2--EmzlD^9(jP;YX1o%Y z%;QBz?Y`>0=*SHDjE#@dVp(&!J0MhC1ij!g8?+VtyO<(<5>6hfFFxuEhwa7opaLe= zv>)Mm)FcL9ihO^|sVj2=a%uEi()rYYmvSc#bp27;o$p2YU2Ls&WAl=^-kcREabVvTLs1KaV2k2t}b$ z0dlicxRIf8P%x!%I>|mYiNF>hePO6vt?5sv$J>+geeJb>)kfCuuC^2Ksyfo|5L{cx z3RL`gDjO$8A+27LjQT`rWy5O*9(7v9u+2-i$kpg~P9bEbv1=w4Of#))t4CgG+Uj1l zN3{($bz62m3vV^-mYvVK^yXr08+LcnaU-z-;hhNFXmEERQu2w4IpBrkMeNpSQR8NA z9942@>z5jKN-7VoC9UCVQT##;1ID-8AWYzdV##eprL)L zh70%aa*M2j?BOo1*%=W(Q4N;Zy4TbCUPE1*^!$&VD1uZRUaj}Is^f`L| z*$SY>$k|HKEAmgGOyCsmE_r+)eXYFWyX{e%T|sG1Td}&asLwlh+@Houyk}*jbNN9$ zkCQi7Uu3E!r|pD+>F2M-T)&fsupEsSW!!%av>b(UBbd#gTU;QV9NBvt8M4Q6SiD$e zrJW?#cUL;~3RTJA?#<~CO$`v#znn8uC`}H~kM(Mgr^S3+<#G-#Qj}6*Oi4(GB_(PN zYLe0%y~;6Q78u!FC!e&vG&mbw0#X{q+6Gd8&aRW=YT#~F95+&~ZiSk7Rur3}z9xB* zIK7V3llP`gk&Cy*(3+Cl6D{@h2XHU=k}a0_X!q7ln%GcIF3VY$miG^TCModNy{@Pc zt!F#$U8*k9CfR0mM{*G}x0h+-q01zZhHai2VY`?H8)&_y8gGpKN@Kx4)x5bHM2$h$ zxqFSF-JT(jJH0&qlhsyet8n_ap!A9cU$>DJ{Mi@ZC-Y-mi{mR3v6W3NHHA&Z{IS>c zY(N-xQcgw%_sF-u=NJ`^GDCx>$4g39RjoD)i_X_}OIziQ@!%nQ3VsWZ?gt%!|KZ5U z)!J2o5OLfJU70=38iVk!E7!A4Swb|?&QQ3CCXc=tdUBCDSj zhXBWARtGv9LQ(t;nAOn9QqVp^Vx}_Z!6OJjGI0>nBRndjr_jNR1~1f)Y}xGevltvL ztfZYNUP)3hf|s9lOdw=fREBoRA+xxn2|LVrv~Lzaz9l1VN4C@8_-Dg}#mYMpsYZfJ zOZqQ9IWw~Y>!QNS-V#o{Vj-mPNJ>^o*OyjO`BkO#SLrXhf0s%))Ppk+n$7z6okT_Y zQQUckQ)}0GjPXOoi4Y#mU?3H}DN`UZU7if5h%Q<1rU>92@$*~0TGrNf3v2v+{I%lN z&mHB`K}oD+);u|-BkP!WJHRpZ3ske2M@>^-sm`mrU8D$3r6L$nKlQR#A|}@oy{dR(l8s9b6$_&Zv7$2yI^} z0-J{^%>6d3HNAjme%a-}+leekMy+2YUB?zC7QLF9+c@7WDr5f`fe3*R9~?@q*H zW<)ps*YqSPG#apFf{}2S!gTo2H|4L8ob)qGH8RC?VHz(QsD-y;x`1s}DNu-;H=R3+ znRm>J>KOmws|A!(-`sktSFcJc9La|$-+wBZ)vxA4HEY=m#zT)N4Wd~i7Fm_wE59y^ zx0_0kWrR&~vEl1b&ks-{l26qa8I#v_OJ^SBJ;jai3_*5odx$K;<%P_Or%Tk6}LuZ`pB zZd+O)U-ekoV88#sbkpO@vz!Rlg;kF&_jT9Z{I_2Pt~^nfgL(?KaNf*)-I-_A{{lAW zr^i*X6G(Iyps6gI4?PhopZx*UygE!_Og_N@$rj>b>Ye0cHZPmlw-P$ds@*h!MYTw?unfG;QW zstz>S^iW2HvQ*{jnzD}(E}T!jQ()Xe1S=<+{ZO%~Y^gNpEo(rq!b@MjApe#7OB^;t z`*T0?`}iL8(N%g|Rm&5dW60$y`IeV0JZATE#AWDd6q^Wtm-pvebD1Q%5q++)VkrC- zHwYyMv%zsyQ(6+^UnP%mk|OsPGT|ZjF%oXsr{pWgzl4k z3u|^j(^kuATsaAbSwldP&c#1Yj;2nI<7j<6k90w8-G68*xjjfVwpIKHvd1&T=f^ah zQkhli-dxXzl~3{g>-;{32O7D@q12ozO9Fw9BO7?iJYUIYB zd-s9-QJrP0#Zm1yk$t5X6UFw)5@aP`d-6JvQpRhkAfXg1V78IzDYiEXQo)Z}#xErA zB`;^6R=&?%YF=|nY8LGIu*Xz|84E@640)1%(5AzcO+J}^_~ahQh0fF*RrJfkJqp6@ zPut)0>&D3|nzZCY<}(ug$kj_V-W)CGXk7}Lf6Z&6-ZkFKSj*CMh7D0bj^4qO9C_fM zSZ~nuAxWIyW`NV&B_?WTP-hYK-BWP-u3WP_B~;@ga=dI}A4Xw8^)p4d>U)*HrvioP(hj0~#VIIHjiyOM zx+0<~tER7vW&o^)|GMFwEHQb-uW%A`O}owBb?taxPAk;WO0GSL+fEKttz%_A{w8qw zDd^sBR6Bv|FqIbd&)4P-bM{A$<@*@ak$)p; z(Sc?-rT`m&9ZK{FL@O2Gyx6d6J?6}|`1?rZ5_r)#NRc7qi+URi9=s_NQJ1%{vP6H` zpRHc6KAg($vz;34HZvpwtJ=7415!RdMm@~5j?|TB;%AQ6*YAZmL!Y;AF&51nEhCY` z#Ylo70{cy$*<(JRviw@h(MD%FmRJ5>YA!E^e>7aa&rV0xUH^%76)Afv`pc|^2b)&( z@G?p)G1l=&knGiP)AkGVOZW?0PoZ3$o2R$QBDq7%NAu9fs;5JDvl36$xB|3*00B=F zGI(~+gp&e+o7S?ZAw@)T2FaO%RWpaY3@Zt19^zAg-FF9T*A)1db$7c&Te#=AkdQrO zeh6Bq0@5$?e|RVZ#CKFHrdFb6AqZD#{WDq>%=^V9U%)iY=7btSw-w2Fx5kdXA16{Q zs+UccQtYlR+sI`QF0wAY7=@tniZ2+a5g*Yn-A?_%U^ z4)_CQ{1FVjZLw#UElB|bkf`jNyv<@MS_?#1^X!vr8r3eA!8hb75yinh4^&$szo<_ z`+z5pCir^3`#1ei!;7wTZ+zW(z948GyzZch28>6RkW)(*o*qGx!XehZNupYW+;sXM zr!yDm;0S=yAWK_rz5=P82xuphl$>cM0;_AKcsct6{We{|(5o zeq^A9a_tF~xHz*0q7){G%g?#tQ)C1H&0_&}8^-_IZ&N{!LsoVaTyn;N#vOGXc+$NP zl5g&HkhYDslmD@sA*L&zS1F8U&$i1p)XF9t zJWw}ht5)6Y3Tu-Y( zw;8!sMa|nRZ#Wb%ivLm$>kTkOup*gpED5&F$>N=?C6uZobmoRK6|1fBlWfqv|FzAT z&iLcpU|%(skv2wM_i)YohHKsbj`8{5Z+ag0!lhn|q0h#J%;2eR=$}L8=}YdX1v${` zv-^0p;}lf69iLlIu2;kPJq0(5-EQJ85t_9yqw`?7VdABe-6nKn#ni((DN2LccIo}J z>ZzJ|bvVQ0S73*nQ4ww(@!0M&hepPzOA}~y^-?nKSZFl!5=LA5#3WcP_BaU*Fm#28 zJ}_Jkgv>Zwc4hx=5Jo8D0oh-8yK4Ef(+NwTHCoXZ+Jh~0#`}ESyTjWg#x{Q7K3h)< z`7iS{LLOxWI6P~X>{-)DNTv6hI(s(xGbsAeVU0-q;!7O4-HnawfF;VbKle4{pM{?`Vin;$TE899tj-IjX_s+{uVLh?Lpo|qg$!-Q4hz$g{ZE@aQB`+$W zs4OM4<3}Wo5(jf8mRHF+tdy+1?{Q)IEb}B%Eu=50yQ!7nZ;Tq`;~6;?vpx}Krs)Hx z{72y%$mrueJRG!+9_SPe%kIxHlNbNu4(6;jCAzb79G#>o9Ow9Q67Dj~a(geZkMgzi zy}in&0RT?+lOJ1AC2CyW+Q%{5O?Bw2Vp4i-*HP$j;<-I`Pf6N#U9`Q1hdUz?+AVSI zpQLYBPe~R@4507k0YD!Ja+MGNAkfpvIgZkYw_^2rqzr>S%7mv7?xqOEBNS}WSl`&Z zqa%8Xgvy-gi6=MJld^Vlfyeh}SdtS7O-k4%X(@q?n(e(KqZ&1k9?C6ClG|!Y+P%rT zNugybwxPGi^=7blL3W+xg;vI=ik_m(JCnb=IA}|5&{lsJPcGcT z2Db!n57uo4A|9=}<(84!kf!=96!zei#=xompy)PZNMHF^sRzvNqFh;X_!|i(KTMY@lT;bfcuUr0%d_7^ zX;hF(^9_(T39K6Z0>qI9GL8DmRiJhM(vIeSb3#uu<(7Mi8G=X>`OCI`IpSJyovXWw zYBNKyXp*rYox?VogZ9E;OQLKBqcJp|g=|3Ee7ysMj zF}q9{4LJ8Sp;^CMPR=6O#c?|3dF*0Qz%U5ak$9 zBSB?&om$~M;V}b>`)>kMA4v8=4_|LEH6jEva`K5RI_8dcyzX%VM=wz63*i^GkS9Jv zSh_a599HJ-DA)2})VmVWy{k@ND%_{k_h&^8*8|#VZot7_Ep_xk6Or2mj!}dd%s5@9 zSDv+h%|N}$hThr z$;QkD!E*}qWPp4>^9`QI88nnpOfZ@3j>8rhz4D?Rg`wDPS__RAj8fRI8Q zz}^07Q^4b6v3K=)gZ<)G4eVkZ&SH~s81KSCW3tZV6l1pA6b9RQuP00`(7P`!f;J^F zz3uF9FO6clDni>8BQ+n7qq;2D*T?lqQttUxh?d9xfK;g8Kh57?+U_5B7~_ona7{)zq``d;y#^M0~O%esB>=)h{AZJ(6zLrt7~8|PN?k3XuLG7D?;KdF`(MlrH`V=MVMXibnR)-67pO(Ns%t4 zi&@X-vMs45>iXJrPSaUO8IbHKlOEba>2X^06aba+#eA`ixlmpxNIG>rdn1B7n7@_7 zM~pY;eFE5~G2+kFwNO9r66g|m#`o>1RdJBeFfHJS)*|z$&%hP=S>TsFlTX=fM8VB- z7j-A~Jn{sA8(OE4FyCE3lVE-E3_UQp<++85okpV)mVIauPCi*oSHgtts()WeU!Fh*z_E39+IsoC_825DC0OH=F>Daov%T4lD-?0I z5NX?74!y&uwAC0i>{_~awhh_Strl`gpIA9%DxDNjfxZoP_Pt2CT%`A*AlA&I)j@h2 zQJvD8mV=5Yu_purZ?ZN?euC~E$4>?lF?ds9^rF6%N;LWkB^QnFpIaulCeT8GTOSEq za^;*=eKCzvVy0pJOKzCIoNz5j-Q=;f-`)$XFwO z#LUvMWs}FRo&cS+nE8^NPq>nC9r|^x7Z;li?i9h;Wwr>V@ zb>oMPOPZRa2s2Leno*~rw1QC$_KhH&u@*F@V-g2pHI|}k1M$w2q>^04Xi+b&yWpDd z&5sJx%~!6|BV(rly!O#EU7ye%jxyepvdRg-0 zV3M69Juop!igpJ!ASClQ#tQugPPI-vU<(-qFThz+qN5b3-2X85U0WGk`^)7YHtuKj z47Ax|pT_-4Ebtdpib@~b7n>L?ESaf(Vd&2HAxL*kTxouv_e>?KwR5P6K+a4*>@VwA zAy(?7kq8_S5QL0-y|} zs@|^1K+qBCoIgP~Kp9(rWeXX8&E$D75L#)KB`X8#oGop0UVT%{FCZ_QHIDOR?-51b zs;~&E8>Jm5&6gmv>QE4fTsl>jn7#GEdR&nP`Ic_eOl)vm_5%(I7%R@4vKt>Rjp}ra z7!UBdjU6}*jaAtAHY7>2&!~#XN!64@Jg=S*u8nuLolkzElo+Qt>$LkeZw~r+t$hkp zva_uSodWRRgOE-kwl({gr!-u zpN*ysEscw6)8D@r7xMOxzu%zQc2W>E5C1gJXt#e$fJ*v3u9!+FO=@@AAOnjiHUe6&gop>vR2lB?&X zme_LQe2cgsVsy;1f^H&uSWJ8nvfkbKVg5a7`Z;|-2Km6|wz8y=-_OBcJcL3^%fj71 zQ$b{4B!Yk(G&UWoAyQygmiNJm^O}ix*#}t62o|6_jEpLMi>RaaTKdrL z`uod|Fr}m_?_|(bKc}r?!=|&?);Lu5Gl^eyhyp`zBg@41Qn#~6bJpjj zD#ZPFUoL1Pd$>Rp#Xp>}vQWp>4Eu3*E%;hSLnkF*`#N~sQ zi&kYSP8Mr+bJ5#WuMTh^+}(mcvc>C{4Z|vQRW4K6qtkr{WPT!&b%t|V%H@MLwLRTV zkULqiu^R|+Byj>Eu9!c!;La)_j;5;eDE$eMLH%PESbpGT4s6Jk1Z8m20GZ<&cblgF zj%cihhie@VE|4pJ^zVO9jEh%Nitluo9V`qsi}{hohnneup-qN_WjX0Hc0|{P&F%;K zo}$2d?Ĝ*Df+N(A_i*{9JUUgA>w1Wh|Y7Fmn$G3i=5bn}1JCYcn;R&uG5k<5QzqC&$B&*!OA2n(c9(e3l$=BYH>&AR$_(ET#BWlrvZ zxhboUn7wB!pKb)jf3H5$O~{kkb37b}Y^TH5i5~mB_WAgZCv>^I-?DErlT4pi= z0uBxWxJv48ES8%72OI_WY1ShXQ?ABlTKMyGbXf6Z^Rby48L6-T4b;V!Xsl(poeT@c z)gUA(uk#8D3JN+pia^ISJY+fw5cExV8C>Jz!`8dIiWr;9iT;F)%dv6V>8Y>(d^idn zTIAA{|4b;IO7qeolQvDIb$NqBpJ+h>oBIHMT-DTcpC&j$Mg4$^x<)X~n+G3HH?5Vz zMo)L$d^&QSK)TCoqv`S=H~SeEw7e{7Fxl_K5CFdq!MJL7fK0aJ-lr*&lher1(zpov zIayN_@e4Rw^v?MEZx`TotEGwh{TlhqBQnTwyclj?FHhV&A<3@C6@L@g@zGDGde`fQ zd;TRobbi8R29MgmG~l4Ir=HEu3z%Xtd0>nEazV5*^hLJHk@Z5=bjS{UmFU~v&ki_d z;j^RT;rBh>)$@k6?maieIlg1*z6u$*)mT0L^DBbEm*A@Y*8Ti;1?pMgVD*LA?@=X% zZs;AZ;IKsw33Zsa*9d9M7=9^w;QYj_qW5F-_cDh~YVpSmCS?D2K7AV^Dje z8MNt$6gk-g5>~m#5Rn-Wr5bNq=pYj6Wst}VU%3*$Q}u`rpnC({U@p)T#@&L&L(K2# zs@%&Xqftlozspa&#fR$DsbL=;1X*-%tBIZ8y6x-)vxCyMQV6F5W55Gb zJ6n-o-KE{qpOrOf>!6+veBtlMEl@MS4#-#W4#xx&muuav^w_=q#;)cV97XytJg|5Q zJyZQcdt#XkKFXfM(L9hA?U^)*<0r3EN&Z{6_ZL~W_YeR47e85S$tW+6p(jIMThw;C zFx`Ocy6OjG5xb=u=f|%sZb?y6q8*?0BRp@~JXeUekd_P@T@sijk9e>31bK7y$V^(& zc8!Bkpzkr#kQ(vk+nKn>?CJWwevXJSWK732lG(I)hp&^hcuVubY7vC|jW*{HzK^ws z{qsJ8kcasBMYyf{_MS!}(#8GFuLpD$^wryi&HZMtm&!Ac9N~%Y8B-lNgQu$SC*FV`+FPg%L&-QZ{gjgTzHlKH~_x3C9t@l3GktglDz&3)l%ejx<02)HgV$=o1 zv;NA(`UzG9oZDLSgj=p%RumEtLuopPk*Xj1ox z)>VA-MO?EsrQIo=-6@eBMN;!aT=PRr^TD{5;9U~5yHvQfT19wK3UWZ660(RQNRSf0 zK{pb5UR0q@o}LVdL=JgP$B;q+wdBb(I2AT@fGL*Vq)Idve_47oi{PWO?|t*WKSCZB zQ?3RyEdJz@WeKm{-}Y8!Wt>kDll(oGci51-{SM$SKX@>xSbymQI0kd7w1&_ zr|jIDf^y9>N^X+(k&FA5>3HI$R(>XJGx_e~C85{+UbxOei0IYb8r9wp^GeaA68 ziAEM2*-Rx>RhS{cjY5svLhF~g;MT<*Kqw73%H(!6uD`>Yq3T>qTyI@v`9L(e%zw^RoYAF;%C3Ri7Yo557@l2@b38kP;ZtiKKy$Hc?igxHl@Hzg$Kup#D{3(GaB)H07i za(|`-n{iA`@&@5dFydC>MpTES#hBp(Zay4${D1Ph+5dO^ZhR(2W_(^=NGE4U69XGa zK*s+i-+w{U&5MAr#xNraj^!t+Kjg;w{HhIEP-IAQK>hHxiBA6y`#v?5%gG=EXI-g0 zF@Lo^Qy4r7R#FP;-nstw322oh(om^#A4#|!!&9)a?gmYLK<#3`@LAg2MZcMu8Cu)g z_~~yrV$XWSiaE8mH*VLg_H>4AdVg1DG8rKf=&pGwJKc8#b5xUa)wd4E@inV2%BcvT zF(M{sBGNzgr=TmkdVUM7d@z;SpB512uXNLK7mT6RTSIq!$;TYi>HN^8;>rXL1vH9b z4MJ=9nK4hXF9eFO=%pjEvf}UJi>!(oqH8t$)JN>=x8^+V6+SrI-7yA2Lo*ythbv+E zm2tZ@Fl&OxDt=51>bMThkh}&GkhpQc85mziE6?5%d#07!avjJH3DU#dML2gm>Gn!#juFKIQbVPCT(06B;lD#y=K^`z3&+i}ehVX8brNL;Y!XdJp7uL} z>T`2f&8Y>Em(s!WhF((aTarn-s-Qr_REwG#Kc>yYd=>j*6gqT3(K*)gO9!OD(nUoj5oN?z}PNh!#~XwiuWemi)8S zl&f6wRCcyk_8R2*ZRk$0KA*!zjVh%d_^169JOefh=3PF|2rhHAgy}}Eco=4QCs!!A zk06!AspZ?S6CLLYZl64Nism63%V}Y&MJM-b0a&3$pYV#zpZ4w}<@7fpf{bt91o!k3 zx+$~aDK;EU6vj9=6ja&_Ug%E|oB$GY{`Rc05qH|W1+&oaAEan5(VYi+kBm4IE}KW9 z$)wdQN~Uk}<)4l}J8d4x>Ws}MWV)6z%swQ{Id-2a%$EyeTZl(s)d6~W0#3Zg%IQx` zJpbI1=#Fj^E4iJEv&Lj61kM>~>|78aE@1YCT6!Y5ox{_{I6o5!n*-c7@8c{Cw-zFM zBB7VhY^d z9Xok3ya5C5cw+Q0%)b@{4V{-cvT!n>{{}$|o%_?p4A(mpXyS52l-K(&Hnb7!fc;vq z_P?BG1_&YxDzJVbEqaikiQI|Fk$!RyxYw3|twH*Kxo+|~4(vm7>*zpuok6kWc`_}T ztq8XN@iB3VxpED3lD5x_tm?GufWb2f{LW zIl3L&iGpK=NR0<zIi`|+Zp|o)lbzoloQPA2i5|qG&Iz)YNv}}cVQFsZXaiK-!on80eQWAMF$VDHS)vv^xtacyaW9Db z8)BZ&S3nv@52L0(=mdBT{I~@J>aK=n;Gp`b*WXrnsT*vXo&Z4XZvX_{tQU91EkZ?} zI{>-df;PzzW;1#?#p#A!w{R8k+bUIVkEIPxj=^l#f_X-oy5FO0*s){EE7+Y*bw?)6 z&#cz3$cTh>j+wjo*1bdIc?YyZM(LJtE0+TB=UNd*b(bgYUvh0~E2dTns#Q%Q=`Ko# z9MUailLE|=A=f>Nm-wQ5O~92XFmE1ZMZav?-^Z0fFPwG@?yVj&AIgds=2g zxXrN0mGO;0x)N+VhVdKLt^&o5fa(&EYeT+!9#aO^{a>%OpxZO-)`J3+eeamfyYR0! zKwV^XP5Tg$gCP?PD}HzdYLwwVB_mhkb8o@H@ywA@nMJ9Cb|0__7)@Aa+ORnnhCCl_ z^KJcaw8_!&Nb$L9Uua{UVy)7lz3kI|fSepW0 z*rlu+I2ikoaZQ;|Tz*r~a*R_^Nazm65Sy75m(^Vu1zlqXsl}=%x^zYpy0fsIddPu$ zCT;ATLU?dic|b-aL_I7ZkCI4@*%ni?g!g%|Eu(RmL>FYtodqy3`PwP&cdZ|0xIlr|Oyno#a)YALp2GNFpYP^1GxVbPG^k&3;VblWv(&Az|n z{oO)=m{8e^OLVPMvS5202Elc#jU(jJk*u4DJcMmu^OOGc35#5tvvAxVP!~ve&dEL(#}htPuF0xb%Z!z) zc~-4nrF)flc~EOt4*9FsdTik%8c>A5^dvaQC~}RpNw^gjdPWP5XqBWb#mX zKTaE{nh8B;Eh*!AYa~6dA4Z4x{B5dZ_qN1=e3I$4h`9B)t#6p&z2@hKSCY7GxsC$> z@Wx%&zLLL1vYOYxHL7m^6oixdeN}p9e(~*ur zK9I0&Reu`!fabW{I9pk0UAnTf^~*@tJ6XDNJ)ywx$wY2=Zn1<~tokVUD=|d_QNrsu z0zwg!{}+!JyxWu~&H0aI{R1NY7f>8i=uZdwZ;LzkgaXb*r7^*^L-%OToI@$ ziB+yxEnr-juF;!g7%v8DmC-z4Ay^{m?eIek=#aw5tpVFUANuI+WL({ zT_(+`KWPu<%zZfq>8M_ac1i3yRNko^!j=GVvfw4HSIswN6O^CWZ@nOf4o?f%ozLCA z@@W@YvmMXf#Nl#|svjm<+rN19&@NlvWu)`_CSEv?vZ`#@F$|^0L>L{!MhzyXo!#9H z-9@xw?Cm3=CHyVZ!W{Wem^<0jlqt>00s$omgmx(fy&);T( z0c^^~-Hn$;87-qyCGd>1D*DgjV}@S?W==2+C&D_s?08? z>e)NBlgmL4hy6URrP*wOblQzSthBn@6aiduh&&r>n$xO=Ew$^`ov4ZNb!kAF6 zP%@*|e81G>ZIvm3O;5A%Xy@C9-I5Ucwn;s}ID(aUr4Qz(d$tLU^?dQF&7q0wdN_vR zAaiY=bRb5&wWv@Wjp*`1(ekQ|1*ZZ_uWbaQlZ(GN6O?R>O}y1*6Y8H#8w(f0%0zK1 ziaARWO@H~tL7#S+znPyIcTl>zOiPWSNcY5rGCurOO4$8pn1qGy9ga~YRx@GHBEIpP zv|+0+s{x&?3*gK}k#zy%I|VGqM!MvlbR>!0b(N zn3${NQ-sac7r_+D`T-x`NZWJEU!`Q({M`;Uz@#bM978PD)(D~*Jf&?w$2Od8j_ZTM zg`l`kEW2~_$ja-#Puvui2e>kDwb5@Y9;mpC<(M*fs{;Dg! z7&w@j|6jXZbpG)|7Fpx_QhwZI;+c2COEorSOqCF4kLMSMFm9Q)A0YrD6u6Rt!1vGV zqs$2nFM+6n+ydYO{RT5eQ@!QwfkIIe7<4i*Q8QMVnD{QulIYo-e1H4Cx%YP4o>ndH zQqft|(b1V!npRkp`1|W`R1YvT##WX6WsO@q5Ij>-6y{>NF+~=zlMQ+)qWd-xvV1-AHTDzzIw74H0(>a)?K)3Hp}dofKT-u^4|uSNt2HyjP%-%p>ixe(VQ z;)NUM^g(MTerdv4b$^XOu``e;kF^=W_u#{Ek+-Cwi9{6H>Udy@Q>BaOcK#AkLtu-M z6YQXLv0jE;9}3o-{_C;-FE8s_}SVR5P>*o5-=;aG>l)(~;_ z`ke@J1KZjYdh`UHhKtL zb;SRN9%Ma$zPF8!Mi}ZI6R-kl82So^%5Qte+z`w?zzWtB=OUU6)i=3qfnv>9#p$40iz%>YQz;*_s_P1obuLxxV zfolkDC{GS{E!m!44w|#b11A>T>V3Z4f(EWJJcgD^HoCp=ZI+#MFW z^$z?=1UvE@0c^n6C|^G)7;FeoD3QMY9mu~x$pHm0U5Ee#vHqBYWqD8`gkk)wzqRzx z_xt+nhAjX1J4SkL=^0F2G@ov>1*FfrC9DTmxyM- zqLFm}#gGqpO$D3s{}WojYe?MywHkKuqTEs|lk^E)N!1DZDIG!)7J zY|kV#yN@s>tOebaNEbF4U)Nvx(^i#^BQqrJ!-kvgapyxm`_T4R0UTl{Y*e>KS^jx20562#7sY;9+5^Kj}fXhJTPUwYZ<1YZMLbPMO z$8(2tAkv;`k5oMXK(_~OkGJ6u;Hwbv`eiHP1#c_$g@Eh79)I26UjysFwBh#>>KEPT zF)*wTytjX&H9!N#3*<&%8`O?)zJohp6Dp?9Wi7k`ln#Hs+jZbsgLEa=1=SAig^0^< z{TCd;tzRhA6WR-smFOx+EbhAJdPiYk8>E&+xC6@ubbDAg z?FZ>Mq(A?tAGo(RD4`f}D4`^MX4{TTxy#PAs4@6zD56CU?E=XU0HiT~0 z8<38m7c=$@FVHSOH+VVB7c>CcZS{{3S+hOJZGzu+FJ3%dzs1Y1Te};Az2{K6UO0Q? zTQT-v02r^}$7qk}3-K!<@eaEoBCp6twDk9*$(!ESq{&;hPp$NK9cb#;A>YUYoo38w z_IH5v_XXO>!~P_r_tv!W8yU^WgB*0~Pmqs1M8l+h-EuH~8VMEoTL0`$$DZPS21xUn zjyxCzOY=S1k&2bgeTo%*|7`1{558_(pWA9h&Bn$5Iqw znqWvaY?0T3>HIx z{2~F#dFWwg;CTftH>3E3q_Y(l>%piEk`|lV2A7ME2P~@|Y-lzK3KOT=MKtOgADCxty2MZ?kIQd=>P>E9hmV}w%e-161aK?;@~{$_E>01 z-EhSRa$gwBZ~^lDqqxG++0pbkt9ZB_?#;lU2CUWGc< z)v&fb!34GT)^-Qx>w_})xQy_%7@%%W&ox5MU zDEy~9FeYQTbFmMF1_s)7i0+e-)9QhhwmqwzP|;u+`C_3O?5SSA9>d^DSMj!sadnB_ zTZQ-Qgh8>)u?tm8_TTnBKXH!_2@nZD$z~Z3zJdHOPOw;NQXI~?q{g6LD)%xnm zD_JFClly|0Zw|=*=6=xq?(9H~SA+}faD>_25)$Oj`=ii`QmDJ)x>8rf zaG2h{{uhG^9wA-<-b>~^Cr(;xu}?WdH!Y#EMUBodI#TDS25&@U4y03Ktu016>LFUrb;t9y{O z0v{0z5@)0^iNO+!bCYwlr!=i#t@juU>~dLL$D<6k7|ZGFf7oUTx5V`Fs|om`LnO3G zL}UxoM_nEy73G`)MGVn?CK`j|y`YERIND;figa2e8DpSBFdBr)SY}80!X-Gqp)+=# z_u4yyY3L9q4=CRv&4VvN%qe*9HL7^@dOD%STJnYWu7jT469FTC-sstoTX)>N*bJBh zTibX3R3Νj*y%_4uy?^A$kvj(pe$p(x&U9M?13J0e1YUC&`U=ce#Ob9frWF-GJ= zu?Z!t192G=VyDvllN|b*7ttYJd|zG?umF6B*V{}*!V!p+VNX+fbcNiV`_tCIIYw<) z=*r>ZQ{W9mZ8RI17JuFzOgl_f+Qt!s8ddb(E~d06RssGn6{jwc>YzU*k0)@h2q&rz zjYI?3`IxeHAR>ba;?dV8G%qZO+i$?6C7D8u@6Vb~B9y|49!kv$W=4kr@U>#S7= z7s4sY9{zORKZ>s~YnTzFdaDbNN7VOMhQHq8(VeZda!;1DIrsli zb#JXTmnG)HhBP$+WBlN)9=>$sykOn5{Lv;;i6v3k;gHO6xfah(lh>&zii#F_d$j7G zB0{Rs@8bYvaUq~SyT7+O6J!}O$<5Zo7^W)z*i5`#NW49w3d5xgxnr#q>Lki-Q}YxN zgfX6|nN8`gRV$!WcIz8a!+d2C)$R9U|Hl^S;aU6$xfq)!f8E9lRa|+dGY^%%ZmQ@h z>LIJT-Vkh4_NSh7V$q$@sTo0z&Cs}{*lm>+vMxBY>|V^{vFcvLqZW6;VRj3NbP)!J zwau;ipM6W2%sJh==so8G)k77wG#A3fj;9YXV2rfoXAi9*yz~Z_3%fd2m z8KG4`uFKK`kyCYxA1X8olWQGaeKX+~iE>r8fgJe{2A28gQe$Wl^55Kg>i)^4X~3MP z-?@N-y1c&r6Di0NSGq7_+wU++UxLCiCuUTGGw|;R7jm#BQ%-E`--Qdo@Ut@y?H>dv zHaoAJEVx6^POZS+x`L{>Gz2eR*p)d%8nZ~HTc+7nhvAbgV+H4y%SN$$tGhJkKR48I z?tRgfkrkhT!$w16vzj`WJeQ?CtiWR5*4Hb0A0M?jS53`2F((VmbENfRG>#H(C5?ugsR#izECu1s|_dZ(w`KQ2DZ zHkqpTYWhn+z@yOXEfe4BIgJ|^fZvOe4gly3oY-d?;6x6lqPN8at=&u!Di|&f*mYSK zzYn^V72Y1q&`b;NRnXDhnk+WKK{v}(ya{u8%Pm!9%*AOt*%9Z6%3bG+Pv^^zo~kSi zGlCDYuo%zYDysTw(?rMC(0#)H4aG;=P+;*&75jQn3sd>w_!7u2G%($)1gP##M`KO% zv{Y^Q_(0$`-!}h2>7(3Dv6tQ;^P%OTf=kR+`2cuTe+lsP!_GHf(9`ie&ybp%QL|~> zS5%(?TQ4*XJf(GR%q}|AGaXepCAM!lb{^6`Nk3yhL_L_iUQe|!%D>wIxbL(3*?C{L z`50}!`J!2axeKa&y}hJNwr`j2t`DGBYpSl^iP^FXx!P@gcZVw@l^z-0;j<_web`8X z`^jor$poT3B%>k}fj8mZ{`TM<^C!OSgLu`EV;67eG8dpYm#_IZ^YiI!sq?Dyspzh% z!=j`t`wE`S@LEHy=Rtr>j${jNd}Z(b&hUfp#*^Vuk3$!=3S<)qsd8XhmgH}is;gp) z$-}9cB{afpmcXJx11feRh}y?(-t0`{DQmlZ`1ASj$4eHJ*Izjvy%8nZnUVsLgj<`F zK+Mj!(s5$TB*Lj(6jEHka7Fiyr}pDjd_Pr4rF@C!v0a!R;jR5R{oXuF7Da_jUrilu zArjkwr>0Nzim;r-JU40h^84d83SuSP$q0blI_tV%vCpU|AG68wd}rc0tc?2ubcaX8 zJl(3bSdJo@BE`NkUaSO%iuuxBVzdLYIv@kw%Gfue!(fcfxkXzXCBr1g1PZaU{;p_H zV)^S%VK=O@5m>OMUg<=4va5KXwfN*|5AT|mWY1v9!KZgN2)*Y;iO?34AG)3?*PSos zqM;_a?&*3rJ_V~6+V!b>*}Bc~o6W3>?=CrPYjY;&Cl(;$VE~<>9qobg`dmeB`S-rv zT7EK_tGVY1d6o<&U!c3X6jzG(Pi6jd^@LY7yygfU>P&EH;$9rA?6K>6lj)cppI=;L zmnBooT)RIjudeeXc;%oObU4<3vE7Novvo_F3qAJj)KDkvzeqVNcj6=J@3-!fkv>WmS% zhv-T)5f2~k-dGju9 zC>O77^WW~g^|&1*Z#~TEH?^p1VuSCxoqIqrtsvShg+ca9!b8W0A^(|O6=QVG1hZV7 zwr%%@L#KE3r_y&Z2HEG4cM%y5vk=@#sz(y-7q2@aaf5=1bmGBi3!l`YiGml!jvLr- zAC8@2hr~}H+3`876JW03mC#B*fSaX;EKnWhiIM|VuBl7cvCM@Wwq889NU{Kg^Bjk5 z(`5xe3TKF}jC)(i5r51^$0b-mlephJ=s)6w6_I1&iQq>(TfXIpKc)en;l zM9F7(BzVK{^+|#hY6mpvL%2IJcUu^_7cjxSqTHj;IR&QXA?b+=?Pf`E5|73P<-EDd zrT*&z-K7u&-;S3)pKPB!5CGNKCmCf_GLwR%zYH<)>R8j3Kl7oaO=00r$bwUY!&V?G zQq(>6aL(WOxoDVsE#0fiCOzRV3*32%Zc#E;H*xwa(f0*$h^0gvh9ina=oaVm0V-~r zpS#4q>T=~=bHrEfjqvK&W+S2q&Rz9f_hH0qUD%#=+{?NTWiBCcc(Z zH3*DQ)zF+V%TA|TRA zGgwpmV^-D_V96XV&#oLLAIqh-eH=k0e;K&!sm{4*90LKre;j2Eyc<7>)MgotiEeap z){3ksHG1BOz@Z#o>2SJe$$($|PWO~F`R1scvW}a`G<#~*cK|1aDkA1XL(Q|E2n+^H z3Ud-6wKmG9LpT`-94WT2I>BsEa1Zg6Tppb@9ybcvbKkE^rw>MH^2Lv%-CMu<$=6h~ zj=ASrCD^6WGu=bJ!#r`d!X$A4r;h?V*I(L10B7thXSQXV3A%aeQW(_HuvIHJ|K4T7 z)-a_dicotg9J4y{A417JzTI^dNzi{9=cenz^&lWX9}$$AmHLcGj-)Y zKy(*-fsKvhDXTzm9cySEu9Q4~MN4_{ljdx)+)fmeAzpvDQk}j&;7Od*zK3UZJ;urn zO@3Jqe&Q*u?WQyN;wN|9Qed=Ru8iBTR(8_ePSVJcbZIxDi5>GEAT-t4`PVumlN*oL z5d#e~jg?C#{V@*fl;UqjlOk#XB48;{SggL4OoN)x87k8K8)mVOB8x-!;wj3nV*=IQ zcM8DEzMtpO?v=B1T88v&BBsQZGuJ}TG$qP(CCbdZvF&gRX6hHQavT+@3s`B*z=l}o zVTN3xA@um^TEi}-3=wuH>NNB3&#b&H%;8B#VJk5k6RKj^S%eJsI&)p32TPp=i7QBJ zZ6-rkh0zejzT7|QoF{GU>p?Nb-(P>}dcuxUa9sxd892s<2^Ff7Fua^rJ*y2d>vl_!N59+TpDT$rrTmMd!0`X;;JToNGg|g%}0rjA2PZ>x+X>I z^h}wk&R(|gTrA&&QXzpm zb=6A$pmr!wcnHpsA{W)B-?1a-p+=fT!MoeaX2nN8Kn;(V0`roRidRqV0A57JSY-#m zOI#u=(8ENW78)+AtO8FiS7IC|pvqzSQ)=XwG+1e_X@htc_<0h2-}fsuF?CIfIFPbJ zc5eno1A#47@HDJrb;iuxVIfPOGo#jygUH66BZA#1h1Tc^ylCq;F4Ku-!uVlHdq+t- zqrBTC81Jj?F{O|*29Owsty0~sjOM$fJ8C_XWUMLPm7~#;4rLu0%c|6&wHXDTTT&g` zHpXB@dBcu%iJ?;X!c=*%CNVM*>>s3$SnKnwqHDxb2jEW`L`=DCVOlGZZ40lx;>U;j zYDDgWlHp^bM-cMx^gkj~X#B7YFmiZNTEt_8ps4`W%+qX@Vq*1hgg#oYF{}~jZQEX` zW#CX%YdNyyp=vP8#7t(PBeXQN6g_&U@Ye3u?nJ9WvBssO*$s7{+@_H_#dsOz7^z0t zBm!srWvTG-L`}Gewk+G7C5iKc$&LOv^J3MA!cP9!8f8{1O;3TL3hS;O51_F8Qo`|Y zOGb;d!MuQq8O(P@uEy)!bO9oqCMbf33(f(R`n=%1qjH&nhL9-1^szve1|S%0P@dix zmnzvAWt>N2@e+h2$c!0&w(FYO*9)U6IhR&6z0Dc}`fEF04&6vp7{|XpQ@dhZArAdx zR50UUNDX4sY4qK|PIu6lQz5fMT-NIJz~>X{$!0a7NgqDxA|{0+I2R9W!|c5ltuo!GTz`&{&QdleD+UPy=D_UW+42Z(clcCDGRF^tY4fM!fw2ulow5i5o5 z8wV9D#~Norh2_cPFviAMl#~uJFE=a@rx+`H1)DSPXM*jQ=qUa!?1>3!EKUO@O!Ej_ z_3&C>_=UiVb#@5IRzwDVcYCBr+z%?XHYg2L3S5P4_e!CWHqo#kJ{(FVEQ}iro9&&F zyL8lNWlEeJsE9D>-d^H1o2#Fe7gibR3;bhb*naLFb>;q53<5jN_+^QmR%n)#7sQ9~ zcm>|4rSAvb!l}F8j97f-Yzkmx1n3@Tl15Vm_zu*M-(>{d13EC66EK=#9H4IpKJ$ZS z$AaF(*8#)m(|~L8?;s2|0IU3F@}R>Lb^Km<5HaCvvT2;O)xXF@I{T_trO68%AuOhI z_)BbNS8UEeDEIbp2^boUJRt>wm2iqQF8Ww)G0sluH*VGJmBwp*!|5Y5@guJ5_x;zq^y@-e*a#*_w2#~3Q~+O@QbvNvg@vyuLLw^LLZiUpwNc6 zWSb_P=ltIIkJY^^;@MK`?IHb#$EERNE*Vzw?vOriJsTVT-DFzbEvaUkZrU+AK`$mZ zIq+zhr_I{01iPDV9_2}R!r3%7IWDc0@sa6DYl~H_c*;HIz%G+kp*^Wz#Jf%UP|mKr zu#2hc@mygh2lI5IaiC0UU6`?AAUChcpc-Byqa$SyPQ%_k1s@D^U3?iz%Uz93(KOZG zPiz?SWNOY+EOoaNF(s7^pFcW}o$cQfmKtsT=MM6!$PAH*jplT(TFKb-S;MTIL zch`8?F4CAfjhMQuS*xZmR;)0YuUxB&w970{_^T;w;C9oE@B7&|F_`LBFIvXd8z!|6 zoXJmZWE65}%Qms`xFQtkr9q1nbY8b4b3jX{6@sM#+w)VGDT4)~yxi523yK7R19ND+d$}a7RSDRnE8VTkT66M!RGc}|pQA~|)4Hk_z zj#sLkB_I88QFc^QtCm$Y>=u%bwUKnnE9GvoWRwDFVJ3;L1piwym!Q%vGp}(+2Gle~Q4{FL+Kx)?EQ;*&i6 zX$`m;uxAIS``?q2bJ!KJhFn8vq;V#bZPM(Xa@vuM>+%W0kqQGD*b`&XLmX(>mPpr) zKl=_Io1G(hAz=;NEZ5@JpU|q)?oO|>_S#4dA=0@XOS-llKcT4oco!+6=hxI%Qrp;F zkN2pi*FHRVS77%sK4a17&uKdqFN^N+t=CN{0+YpVpp!i*2?ms)OT zeQ&=@ZdbF|2cOuwX}Z7FkWA2_;8E0O#s1Sfh!SldvJnIV8}>m1pE3yy{&gzbO$5v* zm>1{KBNe<0(Ww0qB}1z*lNI?fLL@yD@dRd?&7ZH-2SE6Kjd4?Y^&Hz3sDoBjm-M#e zPW8#_obDfjx|+V8x;JL6vn=aA`Kr3KH7yD;qDQn6X3-TS4eE1LDiv#IwHw+tu`3rA zVktKWz1Rk2Yc_`FUQy6bi0eKm$q5Uu&V1_A4({1BsVg=rNmQx04KA5$7O-b5rPUjk z5$ezFAQjTAz^Ga#{txFW6#*rGhcGXjHmpxCKDhCEwzUZc{(zVWp%1e`7_C+#IO=$J zgEcHF)eqpH?`OkSIO#a2p|Ha| zg()Si?^aN7JR z!oBDh^4>kQur4~v<^fgd;AU*TOnyaeSH+E-$8DRuT4iqaQw1?hscgnRCV!A;QDmaL zW5}{O>ADLa87oKUx8;^5;LobUBI1RQRJfP$(d++V?29IE2fi7`twga!tp6LIu}H<3 z@hTgi{VJAo8D;9Xs1q}xgr`@{*D+&D@F#4bx0JqF7)ZH*+u-%+D7n;P;_2XBc52$a zt!j}DC8}v#gXNX2yNo_3)G)Y&C9xT53N97NwpB^Lgk6K2OiMALk?2gRzcp7(UzPHG zYSxFhYxu+0qb_1YYlfS}yf=lKS6s0rj|MyLGXVZ}>kqtyn8_!Ju;RUU25(PtN^UGm zUB%x*svcjTCY5Yn!OR@p1)t>Rf~D;1op<~>E%-OQsf{B~iW|Qhy>0Aj`1?a08>B~& zF3_rnNXfVP<*zrND$y}zP{qSDQ0OVjDcTI*Ss$|6WE4$^nwAOjXOxia?YpYu#)2xK zD=O5KOloXu@yOb_`lb8m;086ItWtj-ELTt4-!B+QkJHM~z*%KWAl^Vi1MIL)R$4RB zf>N=;23Llfh60s>^0qD=S>F5n@nAzx@e#Nj4aSxz@n$e+ zS=BuD<(+p6vgxf{I`##3Y?}VP>+6g2f)?7TW0L)o8)S)rAxTv9F)>4JWyPB|iwK!E zsS{_a+cNmb`9=syyZ|QB|HF(>E*j$MI!F|6ZI`X{Re6bR16KFYs{7DRVBn z0B454kE1BFxV_iC>n1Hlp53k9c8vqG(WY{dm1)fvn#z|oJqLPUg8;43v$+~NEluzF zb@*l~tX67FF5{|G2|!i{!Wid~Y!j>(IT z8i%B2#U)&8!#OW0=zrAUFn_T7>a<3;t%@f7e(&gFG`}0wMC1A1Er!My^?m5b4Bh;k z{~TluA8<`{{t(%|J)a28@kAv(nLR|`ggo@%?ZmpNNE4@-C(;6`5{AP%&PdxN$GxRBr}QQj*dh)b8}kBy@A9u za46Y!;`2D1D7jo+?7vbfBpjANAAz=zwwye- z-si82Z_y{R-LkN74&Z^HKEp1+Us2%2%}k<}7c_;qP5zB7%Oh*NcKI905n7XbEOT0(B||XYkR;T3m92TT0Vv5uB@0B9^JMMepi<@B@{leKsk6>-&|YX{WRF zRJ#i&mSC)I`)%KpDpkAd5O>AQW>36bMTm203jT-O2PN&-UK>#jPA@%nSOYyY`$Vwe zv%Z%C0uT*HLIqlRa(v0yrj&jR!`j~qziFPSrXJGIQ?9F2j@1b-T%fJ1L{b&1b zXdvXvhA(8G$U))ig~vu!aMn5tycs#yXwT|QNAOC^M`?~FXC9(FiCzp@f)>uWHZJ?< zF+O6}o>m#64BpX{%7QwBlwrnnBbbD|4zZc|efGDZ6Bu<58Z;#qJkgV~yj>@2|GjT! zJXQ?Y?DF;d#~AtXM8c+rj5O*aFplEd%xE0Q5tz17?Z(olTvy-p*ATX!SC+cC+-|uR z+T7OmI|y8pYp(Nm;@CL(oR14}1Y`g7Lt-AO&%Ds)>vRzt-8si~K`B$tS#qt2ly-2J z!G1Ne8;*vnLMz7AL#?SMe72>aE5XeTy5Ig@MV~QYVYrQ<01e|OT70-+(-_zRJu&86N1c; zYC_e)X72-$(4UOpz+q!2n$EnHzQE1w3QJS8m$q4-mi5x({Uu&$e!TN|jA8F!t|ZVm zI|dy?2FU5l%+K4 z6HFGk6;;SJlpS%gNxJ1!?U~pyo$d5sa~@TKetEZ_cr*@j z2!9`b4|4`SC3D+Or|qs7pc~Wto&{s2hz%EVQ=xO=F6tNP^HhP)Fd`CyFX10$28gr4fm`OL3gGtlcciB5(-QuU3C>- z`GcK=vDxLp3h^&Vf}@mWI)bxg{ux)n+{MC>8c(>)2sNNzWba=uJU<(`9lw5ct&$iN zmjr6E13CYI1_MY8KJ%=|pN~11;uA64Zg&|plp>R20=vf()McXFJ+rpGQe48BV=Hi+-XRY48vJ{_2a+x? zF&B+28%_!o!gglmhBRt+62Xczi0?EoPgntls(tRWV#lt`PUr0}o$ZM^!c@*GwacG8 zH7;q1BPI9o1>q)x%BW;U#Fa|b%>rq8V`T+)`PelfNwB)|=SROfY5OcLFxd`{V(m>w zFsmc)R3ACV8#(Q&ncOwo?*XsNn5wQ56*QeSw%qDgBbS$^8m4RE>WkoKJDQkqFrHVz z&UKnH9!K4}TrP>qgwluw<2E(ghSWAr56=tWE8wlJu7e=^aZrz@u61vJ*Je7+^rD&O z85h}m9TBYvGBd+A#C{0pN_R9?^|Z=qhKd{j+O@c&EcW3RgqSQdd(UvcB1r-!Yfc>F zsE{KblTU-~1YrBlAbPA|I3JVO`m20ovqzQO>S`LZ?FYSW|GGg2EsSa{Dy_;}wh>$p z$QY2KWuZMS&7u|H72XdXd)>QI>=+W6%k6H5p=aC)Rhv?{1kZ*3LDA@%?=#lFk!CfW zpW_!Z-JeK29>xY7_`~*fg~_gT-+Ah`g|18B=U&49XoLduRgT3>@+;^XM2|OTGvc(4 z$tN&&G2`TkW0_+gq9)P^W|(AGOg&9ZF5$b*K6^j>f2JxArVrFd1jgbx`_X@Q%*S}8 zZ-?UN#m@0Ee~~lhXIh~>;-oduw9l!7N7A0eNjNcAFaMKN*3ss}5pEpC1AQ&M_CVNj zzMuJY9?)+|&bg@O%pghMI4YVnn`p8>fms>{A77T~xSoxYjMbq(K5lLfMWgY!+yi}R zAufUL(PXw-JwJT^KsL3(P+E2i@!VYkrtSHVL!_R6tjo6dMmk_|g$XT{oMXf?9^>2&Uh&=d{7qorAQZJ^DY z(&Cu8d+5;{^nHG#_UCHM^N#Y>%G4#Df$uDtk;2pA8R>}RXgwzhb7Te(57iCEH>6pK^(K zowz$RA+$rv@n-q9u}-F|=*7kcZyt|9H}8voCvKgo;k8t=zWEFVF42j|6vYkvkXC1z zbxCgew|`zuKkFdtcFU6MEUAV!{|?TMJU;&8)@5OL>8aSGxxYIE$Hv3*7(pX&m<-uR zmPP1-ww0GkvT+?M^1O@IaivrSrk%J~UKRN%={Jt<<{tGk^K9Klah6x-{TSa_zE{Ex zt&!`{tZo(grmh$zR4UrMKsG`Ga6XRZ^PI^;zF=achUQpzU^eS2g5{(nw{n~gLYc5= z6Ts0DS0M8VuI;zqXd;r!-6o4VqEoQ}%T_g~lg#9@0A}8yIHr(UVBFE*LM?Fu;2Po- zRf~WiEv>5j5D*V=fCmY3WZ6sz44sPU19S?tp^F! zt=>j#2&H1(md6*WB${*7_WNcC!4~Jq4#Vg8PITQ;`iZPA!@!kIs{K_rr--!g^?2Y- ze6@f21If!d)}09S2QBudtG!Nb-VUl_7kX4j-e06qr+@6iTd7gl4R`$`n59R5e8s_B?hnK}SSn|uu|PU|fu zYG~_s*H#S)sQ_Ri@S(YdZqi^vsXXgPmV!AOa;VBlM|`GaoQWc#(u#49aC;fq8u0wL74h~`%*#|~!vKOn zZK~>zYzd=rQ#?GhjvaiRlvlku~S{ASeul#xXL{0=F`0MJ;jlY zTFqtw58O*xPatWr^?BrNr3c8iN>7OFBBPyzbyYx~M_<)3zZA86D8Tx)z~(g#+P)c7 z*{O&S>QWgQg&hhvm|-alH>{lEhE72d1r9=fc6wUFksDWx4!JUx*zhzPR<}~GmKNk3 z+Zc=Y6ByANWKT_9d)@jCw|ytaSGZ$?r#|IZ=#9Z@sif&kZM5}ei8dMd`km}wldo?# zrc>%y57&Jpo@KYIMRp>z`HM8)2s}E!+rJ8fGA%>S6^VxrRZ1MaEXyv5(GB$9N&$NY z+;QpmJNude>_eSqDmVrq`wh@sw*}(#>P8)KaMR?VhcJpH8Km;$x-_*Yf}&+o*9K=p zPpAh9#ay$C3+pQ^y*$ev{@x`_;yD(&R5~=&ajub`l^vHm;igGkiPZ4-jarg9%q!c| zYnT$3sbiFfPn)>Mp<^cPYmag6q?br9YC3#9H{3_i-_RE)&x-r861~%ynwbfC|H`cx zGL^SX2Hu$MLIIxT1nYghA}pGMa_Yn##Ol|sKm&rN^Ni%Dj{lk1TVx0~t5<=RUk+}p z`Iu%z@P!+l7*|YL%_(J$JGx9+ndsIs)UQ`vdcrNaYpg(gIN6sCo5LRNwPIlvB(R?^ zQ7B`dj};LBQjysTx%tPK+6D6v>q@=6q8{djGXIOJyEh7isyKbt&VvwC8jx z@l=qtu<3l`iM7wAcDD+}+84ZZg)3M1rnkJT(tcJji;iy_j8PaMVDaKQkfa_y1dHY5 z8s?!)orw1jFMZ}x=tJkO_cR=-?r0`C&-Ex;;v9YK;kNM%*O_nqK2kkPViY}1$mYcr zx^5}pdWUxCk)iRNw;)}%(mfCC)9#+9Uv~cFa1T6^x$x2NY1Q>uD_1+XR^UC6)5_4j z5gV797?mbz%l;DRmMCY4ZrE=i@TITdYfNnUAsd6HG`&_-bwS3x0+ zi0F0vS5-;o#B9)H(GF?MvLuzIt}0B~RQ~RGnMkWIgT)6ItHCJFVZO-CY9soyy z>I~b4ut?xya8sjgJgriNEOb=f8WHfDvBsvPW${TUFw;srs4d(Ox*Pf2Ld_oc{RA)A zoM$%2yCj=U$0NiwVK6LXo#DS~djfF4T;W`7MJEGAufMt286$6dXOfZ;6+6lxIklFf z07t~RgGaLz4QioEJ_eUcamV*SnyMV4I-G(d$AjvgRz#ss38zI0Je2a$Bl zE~?&AuB8=u;1_bgj(!32x52>GQpdBJd?l;GnZb3QLxW{i%Ke#3l-D)6Yh3;)!zTAS z?OKD1W)W?J##QH9qq?fTs=3BR>qe`_hUIE0xBklBr`YHEQ^s=9*Ny~OoeXG_V;ni@ zlc#h5v8h6at1d%cPe{6a!AJqurDM$ssm*T&3Xq#`1N0fxEYDx%vXNu|xwXMRCM=WU z(w-C7cE_~z8EoD#HQ7k;1}0f~t%^XA3y-0N6`tNva7vt!`2dx3UH#73T-aO03=#^y zN7mM;hcH0N7+=ZbNpZyc5-R#b;9qdWJQYs5^kjxiz(mNWf5OAs2@%#N}LXg&^yG#=m_ShgFgIZ z3EoW&k{9&tXl0>V*(rx zlhB%;O0cF*IYZv!?NGD*>C=4U(7ill&bwd=}A1 z+NDYFPo2cR5NJ`>TV+HezCZ7Qlih75g0qPc~gHm zQh2#7_H&Gr(WSrk)axel?rAnFHLt^(&3!*I&`qUt?B)cV;F|}Yyn;RunfL}GF=9yb z0;gFnxUHm1$3CLwFderw(!?%37~<7lm$S}EX?VOwaBX!zL0%3zTaNWH^JS-f9k=r$ z&uQ8ZZudCp`nt2NKW;Qi7-`uzmS;4@zFf-CZ`5oV#!P==*k%mzj4{Ft+W%?~_|ymr z75xckz)Jih?iLL7r})PO8oHBAfkFxpVD54C>pMi2B+WGZ)!fkV=S@+arrx;KFUVNQ^MjiLgu+((RMLv?w8Mj z9w6<;2OEByI<6;F$@io?R@y(>hb7nzlQcK(rH4G*~9T zc|pG$@v>-^e{tq@S}RjOK{OHzuxD6_V^IndQRiDlW+V-e&sl5WDs?3iZ( zu0Q8adp{qNK2LeMuFw{dM=Y7c-;F{PbllWoV%a9`MV1+-gr89~&5ox5{Cx9t?R5F1 z<6IJ4WyC&gKesh}=o{`Wvkclea1*@wk!53xVk>4Dpv7g%$o!1z%oSj0u%+pR>Q7}P zOGZf26f=~aG3p4){K`y6KP=ifCtCuNXxWfjG+EKo$5e@&lXv`-)Y=KvzrS2-{cI5X z44T!Qpw+0|f-BUY(xur7g%ZVgm32%X#04nQE6?9F6R3lAXXZbS`kc3^8;#h_cAwT~ zGAyOGBqvqpGHk?F)SR&;6hoy!v>jJ*I9B6WcFk41t#maLsnGbDB4wauXx6CIrC1jJ zOOY>VyW31ha0(UpvvcJ`W!vy!Oy%gLgk)-w<)sVhYTr>@pUo4S7K`1weFkWTaOILk zod$7*G75FUMuaOR+FCjLWXgY>zPIwA=IcJnc7vu-kK;5t>5y9-8Q~$O5>MmtA76wH z1kW!XB=qKcYT z;X%32WXwrzQIcdaKbK(7Gt;Doe#z^{0?~MejGPD{v9G!U?Mf|eShY`;tY0P+Nn&2U z)Je4mFitppqxt;W)X?ke4w3B&O|1?G2Vwf*1E{@eL)+SSqugsfbNjxv>s&ZL!6S*z ziC5kgPUAwihM`^g({hHybAy&eog!-J-(bSTI53;3YVT7dInT_a3-X4RnfWa?+^i~wp+_ub-NX2^Fzz6CKPp*-luHHse8-r?{Mbjb#eim z4)9pF;y-trKiE=2hA_%cfj3Pj*bO%d2986$ZSeKz?IzUg3zQfsbmuBRS8(KVYZ)?s zc4OE&u@!Gd@!_61O%45gu<2VdaNlT-|FtcV^OV_QS2osFQJM}T8r(tom6cEGFCpy2?Z8{H za%8_@GB!08s%4gF8@wsk@yXIAR)FKj;3rnjgE9^i{3s{3P~Qkg(76#`7RsOr>cids z>5(%>3vc`zbVWn=Io!~{Bg&=`9>=Cgrtv>v+i1Rkx6Wtp3Emi<(nxCqe6_L5bOCrt z`==Hm+C;Q4#%$4IsyRjE&c?Nw2+IbY!&kDt`1V45w}jv#!g9@Lm_F)Z_4gVZC z6@F^-lSGk7TTyK_{423chK6kNX1%NufdjP}5M#CS{r`4&P{sJ-^VX2J$7~SXZL_Fp|7G!_m z!Rkq124~0qL#UTBxgfI@g4vN5yxde`_WA z)p|soiVRSuX+Zb5_`8WGoUrTi=Ew3+n>JTz*cW`w55=sgK4j#H)(=^Z>xh%=Lf5Ak zTSQ&44D(h?M=^)KQoTF|=taPk*EGkSyjtv$tNxb| zRmaFbPrU1)PquI5XY_sr{`R{{;FzN-4G*4Xc7OvYJUy}!Z#uX^Qx3W-e|utgylSEu z(whMfsXZ*OZwZx{I&{f}gGQVS%G+)nst&LRpUcFsvg9A%y2t0-+a0i?smh-#o=?)x z3lL1b(H|H5Z&aWk7OS774N|FwKT3HX=zaw!J2C7Do$A&g)301=0rZzW=l53Ttbne? z4+EX0;LOWF zi&VTlKUNb&^Rk%%rpWZLeMA_dy-Y+uFWyl3bxhzl*_OK?RJI=9T2_{X&jcV{QTLE1 zYYH95Il;4Dxq-&1EV_*N6YmW_oHkrFz)AakDUb?n0)>s7+woW}XRq4}x7%-L@>#Y~ zn*xL4()fxbj%&Kt-y^f!Kq}L~J9<(EhkwFW;SLa@4(PX|?1@C?T4RzY``nm^y)k65 znC+(Z}!3UT`O zAV^*ujXF#yFRSPOet~(n{P|+!jxWzAby2el#_oX6!yizI`k-@oH00tb!0%gn#Q4Ca z7=((#roSF=LpWf?5cYtHEp+n|;~lKv@pHkTq>CA8MLgh%+R;lo1rOu+BFd}*tM>vG zk@yMwxA1b;v@O~=Pn5H8@OS}9k8|a$o$UbanAov;oT>un?HkjHu0!V5d)VLMDV@6F`-QIv;^ z?GB|Qpp0C!u_sC74RM`Fz(a%)`y_-1O&^x*JcQFfJUfgLJ+f@ws|Z$?xoi-_f4RPt zW{A+?g?WrIXhJ=R6S!K0`CEX^i024nASj{-m+!y*U?gKxYcwi_^m`~^>K>VPNTCyE z%)D{$!o(^d@9mGSp((<`Fe2#en`NqYC<2UhIQH^3^RUVQNq#|5NZNjrCHguypC6%Z z1QKw)(2^hp^`Qy}yAVY=5{0q^rN;%i1&Xs3MI&Dk;hPdoSL1md`{Qg589UR|*cW3* zoKUd>`p+BVPfz+ACuaxMXvcuzNBQzXAW^dkmZ=9*JLn_eEM%T8a20?ymnZ(`tMh&DgiUs$`4CSIN z%uAC&ls(QIo6H4U&hw7pJCTH7l2p(m4ZIKgu=9$C>@sC;&#EgmE5%Zs0WQ2?_Gbjr zAh6PixzdQp`6?e}iF#5687G-18Eg_sQ3u_va(uJFdSrrdTf{y6IG#izgekl*;mRjM z532vuJAdq5g3<_nw}G=(MCM#A7E6DjzcQ+ZAsw3KU{T|1DW$os;<}dEn%n06%K;kF zcyQ6iJVp4v|Lf#RMH8v~R4}b&nKktbk{7=Lf0i?ym_nQoHy@6CH=k1Qg-?ORi`>i7 zqIqa>%pLm-L6AGx6kiwT)%ZsSSd~`ixmyOa50;wHfbYJCp`qkrrQ5 zF|?B+?(Fpq)FT+jvc)=sD)zeQdDyc*1rF+r{ZL=RTDf!)jb-7g4uAH76!^yGZ2dW^ z^=Ki@EeG~`1xCQ0u_xUsWBDl)A~I}uri*?b7v)a8DX^I3z`aKvLr8kOkNe@Qab68ka2fau7s>m)bsg&jbQI5+#y*Xe~N$ijnkj z-!C!cV8+6lZXzUljCT8^j@Y4pljdN?cmv>o!_hE^BsN;&$PT44pv{Jv@jNL_Tv-A% ztWEpcSn9^sUjS=XTW5!_@hQz_X+qqUhIp$FyNzAVV4{TjT%Lzfx5GBCq^&v<$;-@t zQ}1pih)vYb?7Fe8$asCZL;WO|j=xhX@WAaqw!SO>y#Q-X8Y$?Q)7n^^=``;?a{2vu|8Fx!w?)z7@wVkrKa2>ca=^PI8?IIjD z6WBlpd)@Ag_>QgYuW3+N*92k$cbYZYDuN@ zPK?kyS9z3a>Kp09{Y;ycU@rcara%Ph3uo@cpXc3pfwRo26HJ1-87>g>Np-gaRWc3Q ztH-M_=x$K`tQ&q;lgZZI5##pG*dzF-eUnk(>cd!OTSciJUR9oY1kKhH!&R;i_0HVz zLnjPCiK79Vr}_TomCUQ_S_EQko(w5DTZy208NL^g&e|GZ+L}r?Ss9LFjI#SdjhuvI zzzRiiT62jJ;bLWC={LCIG)S^Q$qIjw$Z5FB2t_3b2_0JATRS|0M(w$1N4Eff`e(!h z6zcv=AflqOqaS8PILHDn@88wHM9A98tR}YeaWfX_DfZ{6E}Vj!H4N8RXXjj_v)@Iye<&i;OBjK7 z2=qwEng{XfdMGID=W_@O^9PX*;x(5n$jeXv5IjnbN~G`8l`KNQkC+SwLlCi-jbhq2 zQbHbu&R+r`5k-+Kg*SX$4yP6;8K{KZM2_KyHA$Gf7b+t8A51M&^2twx0~xGZ^e@pJ z*>WCKZ=Ihn|A8E_9(6fTk{t7{TQ9O15Eigns0F~qoLVnfFK?lU5;Pm4xL+cfjL5-* zuYlq#NN?FcI6JIza4P}WH-08uW}s<4WWY@FX`#gXojmASlwNfd)50=8$Jxfu6*C$O zpe)fk&?x{b;VHj|xyeseGr*zXQ=ncJCg@uMCQe@*Y5_-pjXAB}Av3&Pq+Z%Ng+-uN zm?`j12x9+KA!|Q$#ZD0FaeTql0+(K8Gx}X)6korTLtfE}IfY)hMwnV; zZIIRAl#|$ki8-iVN-~{r6`*QwrDK-RnpKu~Q*t;yfJNcn1YQvo?=(yA3EGb2ET1d7=;CbB-vvqOpM8et6(sh+l+1 znva$dwE3if;IoQ;w1v9Iyh1f{pRn(!yA}W7Z<%}HXD?38ds$~0{cH=e{XEILgI$hx zL_cP2dRyii{BH7mdR^yoK0VENJ>YHuhF)eL{B8=-@4;umPgnf#6@q@34q;&TA9JFo z#=U|lydph-pCDZszUW^u`!{oze!hG)z~6{n@xI`=BDO$y{GNck!nT0l;OnWr*xn*{ zWFFYJj*m3E^bxvwUvJ*0l1>1(Y##8plo7gx@b|X)wzHvrfu}8gz6GzQP6)Rk?+~}N zrwF^cb0dDfg1Lt;1&ODweme@Ea9?=0ymR;uwgo+f1Aaewud+@cw>$jH5@x{}O((9Ntfo7I;guzS)l+3xCTL zWOyamzsQarXUIx@f15InpXL5O6Mp>choAk6m#p*${OD2kSM_fz#oxN$_5$o*5J!(h zzhF5TUMco3x}(Q|qy_k2`+kMkq=g@`-`fwb!@D<8*7w~HS}A@(mQR?&JHhx_DB1rI z@PD84TP*r(PeIl<;?ZN^ua;l^zwkdtk2?x7KXDH4p})1i{UbPfg#R^=kNpdH{0v`C z#y8jD{Ud2%=eHv~M~^RLr9GnTU)KvVUp+f)r$YQytzpe?j?K-*88dE###)g<0Ldig?D)KFI!u zVEYRfW%tHEdi4I)B`@O}>F}=ki~H>W+RW|&~e=hp0 z?zbPrQs2iPq*B`-e3_pgzXe1C5fS@IzqDbTM?#QGO1=mv3>L^NCP+6Ej@n_b(+=F( zo8~!sT*Ouft|?0(mvne8JMu?`(FxyY-H_r(xSvRA7wpT0ICK|QPxM=!fbHPC(t^*_ z0mb`L?sj1lkk^mShaO+02n?P46 zfKr|qfmLYR%`hu9u&ztP-6k5dVxm0sk^)|XUq1ufmuUegr0=?cdLPHJ|Zj* z^T$ov)?u061-^+;wU_{VcYr&z2I5%* z{Q3dNiO{?Q@JaU;O2Mk0%6v%qME?U%oX5=~sM!hg!WOP*2fOM2)SzmIcMYZfHXFMK zn+lkS>-X)3;4yddr^k1M7=g#wXHY9OH`N#C70W+*50sk&`Ouu%?gQR`1uV~4SJ=0& z6oXi+E!R`JS;mFX7ktqgqw!d2)|rvNh8mdW8AM@MeRjC{N?vF8YK3+A53wOkKRMjnvycc9Q zjxU((_Qa);b7e2sDivo=_Ed9yORa02ZT*dd_GaQHY5iFJP(A6}&;T!n4=pdVFVP~X zh=W@-#40IfEnSKqO&7@fDn;BJT->H5pW~BXcN=bwGd7(&Z|%8`UK1}sZq^{g$8I`p zNQW+%f-7=2Y`@7TiVtQ_$jiR$o$FsZABZ6-P{oNw`shihV-g5gdhoQs$<)eB?QxwE z7s1v3YZKmMgb{mpb@m|f`sn(R!;e=3AEX|kyXUjM{Dl1Fd|tRh{gc~(JdLog}+gB$h$MFenFDv_l7V+a|y zQV@=RCI=n}sD>qPB_LMzY`nO;0e8ZD18z$8c-&e|)SN6Sra6(6j_}_;1W~jo|LfjCIz7tx2q*tx-CpauMYc^$7XodUbqCzM1Px zvpz_^TE4=+brOPDJ%M$;R6CNbuh$3(Vm+%-&E zG{?j%>U{Y%*s+dwgOvY3C^N3?10FL!gFT0QM*h=i2E|XY*54K+bPb}s_C3Gw&|zNGkfV?RmV8uS&n5x`>#Jh^iV=>=aq%;^S&ZbW0`@G7pi zNpQ&@*oy4D+Yuj>JL22>1#vL4Hgd}(0$jB;VrEbL&)-!I|2t0Eze;?NYf{aQ(J>%> zp!*MRuL(U^iW`2_0F{jYWt7c;9yX1rD?Q?FBbZ(yydAq6%nQ&?E{=~sUGrs}718LL zpcT%eJX90?M=w+laN4osX(DSN=nqa^m0Uy{d}XsuCyZMU>nP zER2<47;s8Ar2ksPF99UrWleLYS?h80^5Ft`Z`HOVmGwMpfR4QZx%(&lJlo?%pTqu& zHysQwN0m)0E2|hWp550gG00=ZCJRLp?a9&YzJ%+@#%~-sty-%zjF}-ka#V>_DNwkz zMHym7?Zf&HyVTKcZjS^5Cz2#5JoFlfsd=dgc;-TgN1x~{kTx$&3_8{%?T#P)I20?j zDsVUwqYYvyDPo~x0~zdaJ^|L@|E5U;jqsMiH%52|KDLJAz04a@<9vACg4XqQ*Xvvk zTp0-J9%LkQ!(KhfZ7x_SYv)}PDx2eQS1AnSW5v8%`Nx#jS>u#|M^)v&n}d-z-pYAT z^LY#A63NNtC7dJ+uRU2ZR-6%rnlF zqH{~wA1q=jbjwbHrWlTVT@A3_e{{tMXhn>~< zPAr1!uUF(KL3{#Z0N7jMh$9X8WY(kej1_rSq*=HnctS$mGEYhNAxzY<+HqXg^;{OCt}VPD&P`ED zTd^!6&%+$FisHcdIgiy>&sg`rI9_KJz4OU8Y6C&p53wzoZ0~7xf}V4!?NLM;Bfj7l zdxC#uV}J-<(8n!tAg`kW(f%Qk8>IZIfu@&?OHH*jB~5mNt!f%5qc{DG#EFS$u;h`@ zV`ZPT7bmEHdYM2YqrC+_ih!ESy}ErLB){H}4>g(Izb%Pw7FBUMU%V#SPfoaUN zDLV%xw6rzSR8{@p7~{%Z&L^VZYzLrf(SJ7le1}L@X+?lvP$2J%xW-y2&`<�yV0A zZa?zU?ObNNOEe30iKUn}ZMAG{nu4fp)wk=To1WHooViZ5E-XUqT2IMkvrjB3bS6D_ zsM_C7lOrFQcPMb6%5Qz2)#oVxZZkh)I-N)4YX(~c!^tF@eu{_A`mS|cN zmgrP&(;g$Uo!6?5N!ngRjf5>&@2gk(w`oB?0sL*pqUbNLfR!s2h5rLJB&0yN%#+DG z9tA9$CX6m7@pt1yTOM#(krz9@5ysL4BZc^;63lamGF_NusJQ|6lI0nK(lmFZ$LQJq z%k60)bW9suwz*H*(!%n8F?LSTnFQU!o{1*5ZQFL<*tTsunb@|IiETT1W81dvWOC;F zuKu&m&A)o}>Wk`&?p?KNbyw~EJj)Zv_0}Qk6G}|5$80QitMk#lsawx8?F|&+FARcT zPXYE<0KL!TL#5M4uj>4J%I+!tb}@V8Y>3i{qZW)Y-> z*Rj~IBI?eI@!1Pgb+ z#hVQDAYlE;BMM*0pxrG4mkXYVrF_X%)6AbR~A0Klw65u$EBSBlo#Mc>xR z@<8La5Z4Z^B z7?QGRS{RyHJ-t58?+LpoXCl)g2_i$(H63|x`&eI*kyga0cK)Hjd2@7aa=N$uAMo-e zI?$+~ZIR4R{qM67CnK*0w@HsDM7c2g@hv(AbWUjtm-P_UL#Zl%x`;T*SP)t{kHxPI zVL^~9PIlk=pU-2@ug`6lpv}iMx1%)z+#&deVo52?HR7gNNPMMl|L%@h>Q$vyWL2gy zORu?Uc6R01%U37pUw3UT?X?qoc$(p%PGkIQVAQ;dSl{t>=8^mdCazKu|SPK}jg2{t97A4n1*v zkO>E-^O#T}ojIX5d=e!(jr3A=$}j~>fKlhK2f4Jtq0yr~gK((g!J%gd?0*&5tneNK zmh3?j^EN;<*@I--1V90?Kep%inho|RM*+E-2$-vik+dHyj@^#tV#72!;MxQxwXrN8F?d{c1u zk{eY+wUlB}_Un1frCWj3#nVdeJeWSKWY&$f93op-@3O zHB$c?#XwjKn)&PSCgPP_+ud^(7N;#=?Z)S`6wPDi%seoxe4jV|*cT(@B9)o{Na~m< z*&unt_db;y_Jn}rev!vkfrjtl(O67Kyvl~@Hqk$MbnWucNOv8Jd5QDi@dSe@$TlTo_x;)qaHbUx+cjsN0)6;4|C5A zayE<5Qe`)wZo>%EB2&UyD3^|~*NLl)7zfga+ZGGWeaDmP`yK5QvJp^;Y+R12oF_FT znx%FWQ>rAwP-GT@C&I^CX>NoRVTDA z!CC?{RaJ%2IXx=f&Matg!X`C$?s}0g&9COVbvL5AdHlO|c5&4r!Fp$|Pjt;Eg0O!%uKy=~VSa z*%s;*|8*NSDfDdp$i^SgtC}0SXk>Hpu4bf%y0N$mi4#dDrDtC+dDLr4ikm<^OVOw^z}ePBe(dTYE9vU1aQHn0w$^xpbG@Z8n*{zdyQqes00~sRFcX96M@&BvD@6 z96M7gCtaZ%#ABwtFs~quDY9K=-qPAE9a{H0CYT|A@~btM_8o+23q|fQPM!_3>H*~f zRV%$G!xd|87T$9x1HAI1%Ke_&NEF<=?XN>&3DUpg)<8|VRYf=Zq1+=}Mv zx=}?kCDJ1l6v@kuw$-*w4$-8lAzKyIsu9OJp8kJF?T9F#I&iON{M$v+MB*EQa^Wr! zN7wom5M$CQ>osT$5eegwCW7zXz`k1t?8?eSQ{#kR_uC^#Yftu#=Bo@?<8qTHQkvgLZ8%I$3+UlxA3QTzXp|O65mv5 zKA!_8r6)o$veXO>h3ZGpAw%k#Tjsh-XJHQgL-gVY7^L;juQb9?*>gm+tK!b3+feOs zqejTJuPX1PK1|1&pS|zC#(baUFZK6pue7T1FjaG{Wa`nLBNs(^tYyV6_fTnCCzRN6 za6}Y2ae^vrxxAMXWS3HBDD@+j3)r5UV`-H5EP}pD7R_Aj&oNHt7F+In?ptlWuRxD_ zTYYT=6!c4MhT$d3~aUnd0GU( zTfw#;G9sRYKccsxm@LO@*J9R3qY*Y9_Wk8cX>lEZ7!VbmPeGl`)YADuWZsJ7k=j^1$z`RB`m6@478)nD-R^_|0X5i0=#lC_Hf(b+E2&<_#io;;iPMzn zs%#!$ehbGR*_reZsODbw&p{lYiBSjdJE808HPyVGs6a{LR&RZbfgvVb>NhgCu)t@| z>?LOlW;W%Y`z`ehHmU;lBBAUZz1jw449HB_0vZ@21(C>h(7IF z7cPW@uVHAyU_N$;Z9!xBT^P6ynS@Fc18o!6W=0|6J!LB`lt7X`2=%}QA*yNR+$*TEC&2+GRwhO#M{^feGaPU5pE)G-jr=Lo* ziW!Z}iLS@&Dr`6xb+z)m-7l&T?Qh#!-FH3n^+Tn0BMMn5egLlm) z!SfG=!|EgZ24lRM_2ig)zuEE*lB_6S%Lw%7&Zl8uptWN;6ba?fL@yrLQ+a&$U}+w1 zTpOGH5M%7o6$VeG=B9>MsYAmG*|Azn)a?p8??4JgTo2boi$eM))4bJB1p2>ik7DDZ zLI_Q`l0^(V|Mupq*s5bXX^JarT=n|tbKa1Qcd6@cnL)7&mjaRa503X!;`P816|w|) z&kESZRAl<0+*)aHp#%jRptOKk>}C|kr7n}4n-PhMo4ZPOs1%4F42fa#D}Cr>&_ss9 ziN8~UAhd&#S23W?2k8FuXHqq8k2z-+dNY1MUT!L$7e60|iDM9G(6`HJjx2&e#MKpF zrFNzM#80pt-@xNUaf9zr(!SvGv|wk7Nga|dNjLsGW|HJ zX@s%Lcm)0&PE2aa$eyDNU7Ax7FO~f`2RjQpk0ejINsCvFTP`C9H;3Bcklt|0vzabh z<5eSGV@~xE%mNGxOd|}pev`Wwx8eF^mW?lI?Yd%>-SlxM?ZkL<3 zC`&099Sunm&mRZ@p$zoKphLi&fy9x#{#9um58U|RqCGND=hfe0gKmR{pw_46%K{g^ z<0vTk=7}&}+91q2WHF`^vcyB#Q3FrCL{SBS-}B#= zed(@>YK0_8GdX?~B$;K^ikP(Uupl;7q7(5g(|iK-Z}Dzk#7}mkH#${@l^Vp<;qard zfEz`MKkI6REf>x@QhCy)kI8)6^+fRoeI!kY($j;pOj8p5BnX)E_$7o#2@Z@h0B#t5 zBQdfMlhM`Z#etMo;4t3z({Q=JpP|g~2To~WI6_D^U8kC^x3{UQ$?J)`9MAoPs&%D} zTpsl1(CWJ_h8~Brj@Nnf1Vg9w@mE+%gOEqTNY zGdxjETosY9ck%g~2R}=%MbC+n50$g!Trs}by!u7fUZ=cU?S zI_&N9k9+1oy}b)_2+}0vk$97ZCF~~!9hUW2P6z17JXOjs_DYUpqQ?@FwNVlebk7z~ z&KYMJrUquvned7AQOw;j_5ovj0}Y|I>$1db2eo6@5qPoBBg5g$al2KbpRZqrKi3odekWbwSFOn~de{|hdIXV?&FSG+ zJYJe|kJ1C{3RiN|>76R;@tS?ECMKL!~`o7vp?ph|(N|r^YYD|G1TBA*N zk1=!ko!+*PUU6ZprEnpn#n=|av0KP|QR(I+y?)lc4_r&W8^2Bhn#x?&qzb0UlLDC) zs7SO}15&tq7MQaN4Iso?Gw{{Ce88Z7{=2}zbaxG>ndedwr_0alyR_d#c9|;6Lw4h# z#NWjQ#FJ^M8Eh2HlAlIsz*)U2!;LXLmZ)61&Sf>uRt<>x!?B|K(dN_|$u=Cl^2d6C zqcG+?M${Y9GiY@;PmONLj#8=2r7Qe1=}u+IRAGRHwq$r zGo|@{2R(Kmbv*7NnTFCIdU~uGFhb$6IceNur?Z6su$ii<(LiC;ks-XqtmHb_58R8e zl848$MSZ!x*Gc%MAM!CR3oFq8?l<_=-TS15ftcZ9Z_Ow7)g)DHL8k&qxJUbVc{Ve zNoI{EVzo##s@v#7%T!YOE6KuHXlE^2>D8$QCXl67MgEW^YUMOWooBn02Z9r|t^!UHAqUs1c6&8of*ex=`G_f| zo3IkoBLtw-qIF%GDLQVlJR7Y<49T(J$v#?YviOH>Rn{H@hm{uUyQ;8vaPkxdTCLV& z(y(w>^p(`r{FgR&Ly!FYySNo|sd6@Op5(%}Si4?A|B-aHm0jh(a@wkgk?dz=wZPbI z|Em0@>=bvyX-Hg3pVuOa_NU*W+ZpmdIx^Ygw$%>7>`Bxe+?uR8!8grQxkd5Rsb&;! zI*`S8?%pa9%ASY>BY19R?xHDD@gDDL(8F4wzeu%Bt8b`fQwzQniUG{BTk|w^Yt`W7 z%TEwj=c6@KapKP6&)#bxew$l2tUu4vD3==yjVkCW8^)WIRafRac3Q!;Y_B24hn2obP}(wT^tQeodQ8f`d>?-0+4?lRCA53ZUDMAQDqFrcYzF=!&`F&_XW6-W zoA9Q%xvr@(bN_NlREBhcMML=N;r%+6_GRdp>|2dd7*zgk?(_Pd0}YP@T2!j9r<5u5 zQ>mBziWlpEed^e3%xuj3fvyIfqgS{EupHCj%cENx@e4?D>YUMlEDvnhA z8;L~l77TJ+B;sB;YLiZ9?ZyaU-aqZhRF9i}SnW$yeE279{ z3t3WiSVs}7^11tJX%{O~78{U+ssJrnhUc2Ew-^R7i&R7k1?#o%bOI+=vKG+`U1)ZM2_`-J(@sPQk zhUWQ2+)Y3AQb)zzVJ-&rwV4{5^EVleJ&wcbwOw#%_`M=7+gd?W45qT_%_Nf{e0{3u z8qwV$UOb5RNA|)+$0=q15QpsAi$GzKoZm+=ijeWX$G*naL|Dn$%*BOA$)s9a91&ku^z*vp zVeOlmv^%Res;gGktxc=+#j}opK@4&x5}%jAsxEp(cijRJkSPS$=AZpZaF3ji!$R`} z;Oj&Qx4ykDp~l(7$j~%@h$9_;4Eaa^$r{=?xs`wV)>TKcl3-bp+Ud}uCt`TaL3X8o z_0XWN&XL2IzyZDREGdp-ICSmP&*Wa)zFXSg4K8`#!Wx z?46qm-5!M0juE%|{$6LRI-PX6DAn3Y0rtr7R-~;fI7N~$l1Z8A+eOxlI%YdY*>`&S z#*EddPEw#_CYd#R0@_u3^cF}qRUR21ihR?~M2?NWC)0}Wln8jqq-d@hnQFb$ftvV$lN~jUC|#~@wAvOu^DPt8YKp02J8(HFFEkmc zdPzr(erRaPpSAg?%D9T7X!57Dxa)#ymJN<$>GKI|Zs=!VIc3K3r**Ns5y;fa>L=wm z1wM6TnW5ZJJMM}74?>JsVDsOdkl?@$*mlyny-K?L!SVtjNFwnoUg;l8b15(Kvt+KQ z-BB)45jmRq1S(t}@we8@!arV9wO$qjX#`bX-APS6v&l6=5+X5B{Zd z(q#?W@M+g(9JWPtOOkZv4UWxU_;lav`BfTKIz`nD&8k}JRs@IU{Xbo|&G!-Gs&Q>*OsOVq<88yV(ZPa#}>xj5yJ{Kcg) ze?z+Im{pS9mCY*(HHH%%S&wy7Y^^9UE2}JVgM=M99K~&ALS@n(B$DPdp>rz?=Q7HBnvG;HWkCF-jpUg~TYs6F`mLFwUrF3deVL`8|>0;ijH$Z8XbPKuye3aPC zwBca0%XWJXZ?$w@Wtl%Zdz+WzI<8f6pxvxQTN4jgi4^Kvo({ZE&^PymS>-5rHR-V<60#;QZ-r;a9ZD6 zKJ@PEdlIBAVb$M9$%eP7JR-r$l}RNs=WT|M{i;^0N5?r<_B#ApWac?b$`au>FHhYV zV=*#b5^$;3$W=f!skv^_Bg}^n-b?fwsTd)7e{(GYOGrE82P&1bl(dy?sI#Q&wi@p) z<6AK?l1^wFxLC3x@1pl(_XGAphji{z_5&9|Ec@BZz}7&hWvpb)0_+y}V%q9unvA-e z2RjJD%kn%GnQd!^noX%%GE6fy9W=pH4aab}BAH3I;dV?Ia(j2Ve9+PjD@r+jYrpG8 zr>8G6VO?@l1G#%aJI+8@4k$I9iC|+HXJ(lOM4AT-(j1W8UXdM;W@o3rdItGRs+%tk z4REkNH`@hVI7O#nlDrlx$#}@`$|@V}&b!V-4VHCH8jUqMRrFHnCz9*$@Ak0vM9WBd zIaoJhwv2i0SlKp6|3I~=i5i9|ACdW>Ib1+o!|M#1ESu*Ij?CFm$f5gGy3l zk?t0$)}SNGQO3fd)enXjl32DZ)Lcr!v29%KtUyKp;t7D=4_ADbYVpevmqA^Z_u7Ay z&mt*ELQu_q37sMoc^vWKM8HV+?=_pnz|>zLBoJt>`Zlc5tIJ5|AVT>&10h6|RtDk& z8h+1M+2-am{^lESSG7QwDxKkTV}UBKr-b(i$6AJ?YY%sujdN)~uk)j2A~QzL3dxwu_d@tl4I}i%+Oh|7<&`;=qHNtmJwvE(K}{ zn9C22x=ZR=fnt*sl|)t5<{+eegtNgd)oxWC{UyPr0BUyfktx+HRXkG3ih(}^_)nPc zn%yS1X#lQ59i!nz_T=E|Usnpl?-cDUzxU>!H zL7ygOv>q8Xw2ep6vdac&@kWc+&QhrkiPTr=oZFac9qz?~)xcS%7LSGL3VvmfNDKBK&skK?gATpN?ud2;BEqG^h_z zbQ63!dx#`MU09zcpEX{^nT(ivv|Pk|WNd0Qma>u(%dvQD!`+~(D^yzI3+L7lw@vC6 zfZ0k)Y8dt-6xhyHrfEKf+x2?gb~^iuM{ocR(h<9CM9M5WdF7;wYnT#)(dSxayZT)9 z#R`9R$KIpQqu1T~8Y_kRWRwd7Dhf8Lt1@^gbZPq+=Fv*75W_}Wd-ZnC$)&?c7Si%xl}jtSTdxM1T2N~?hsU$Sb7ixqFI!VV0zfeSsc_3bi-Jv zxA;lQjM2&OnN8|R_DS|Jy!SuEaqj4Y zP+pUm(d0?Hx7>WOV2|zL`a_g_)bmHg0g)0;KFi9AafuSP$iexiY*Fb*-CjRkP>hGu z`*$rp$-1GOkpQ*GV4W4nMA`y>0hwkiUDaxphYB0;VY6Cd$^BEs50b=!V-^U5Xirf8;-%QX4>ntK>-B}8{c^+P7o2WW%8v??z-FqU~h|G5#{?t6^QjZOz1mBOa~{@s}bfbTZ%?i z#vGf99ecELXoP6B=4S2Aw9Q1p&M*sms;bf7JzUSc;o+gwwVD*x7ijg5Tb^5rodoW! zuLmoiH1kBktrB)#Exxv^iZwAb&V!;*k)gjg@Pf0S@#-+NvwQ|D{{stNtJzqPal>>S zZL2BB8Z8>%n9JQfsss2;9SF<67XIW$D1U~QjX%WTh0mGuF3g-ESUhbm z5acYoYp<8PSqYh5_Q`)W!W_jAO`-1j7XFF}XL_RY*najM%#l2vnU6Y|b;agjU`ef! zndu&G*l#IM;_+3ht}wcEh_Q^GDcRRaXkU{qWr{BDaBQ>DlB0VVGFLG%PvIUffsZSv zb62v8JFVv$_Gy$Mpx10UtteMMQPF4VGOMuwXR|~B2{qeXNQiGpTCCqIS8*2(Yx^-j zUi8P~sLs?cga*?kCL@TUi{rY`>W7IqXzKJo!+KG@aK&_Bap5R(xuj`U%k*u(pkVDvxy>@+PGz4jbd|abWefE)4JSC0m(HAvVjU32Hnv5a=Z}ti z6URD+siI0avx{XkGLUpVmkN`=-hbzoX&tXB8-`0G3uofcEyKC#4yH19gL@2-#AWom zcu8cLJul_eS6}v^9AS&eHN|47e1aS{j!DuIjpJXgi1i}(ywCA4bDal3%lfPX6s%N?m0@kw;zxXg`5BOwPhjdBG+E%S3<1EgH^ z%%LuZ44peacvw&B+?t@7fOnTB!l2) z2jNKr`Je(FE>nLmo@=!ouQnPR(&SdXzV-=pc@wsJOuwEh<=q7u?$QL_M-WC#)4zOC4-FLHxhi63GrEaUPN2YqJwUL(Uu9Yp78cQq0#>67M^olmmuCM8K1mr>AAwT^y zYP&OK`V`Q_C@f)|8H!#IHx}K7*-c#!U!d4Rp3cT-YgL1p4P`me#_x}a<*a_rk3%D+ zm80zUR>_zCHI0TVrYkID2B%C5b_LToPp~p3`SF+iXsTC`nUenKSz+lB7s~~^K>68|sAj%=R*vN(7 zSY>B*n$ze+Lvw*%Uz>{x1$MF#ldhO5$!(x*Por?BAkh+3&R#Tx?(*4vr#ecU8hdgM zCX7Np>c>y;YLz6~E%s&ITh!E}IOg7vbt>Z&B^4}pAXXt0QMvP-k?XXb-O?n20O2ND za=C|UJ@tC-wS;@=(YuQPdhrwC<44nnOD%*xoG&UT<|n6LG!Hr_IxoIDj`e}<9YL}l z&Q34~Q`V}TbZg_r($%&?2UP)?b%9Ei5{Y}|zWw_P1ghP#6?;duSZotyYzFqHi~Mp=WJB;FK@@hzjCxKpxx=GBXVO3 zMlYOtE}q$X6zJK1k{lCdMxxXb&p5DyepY8Lh+B|8>~bg&Ybu4t)2a;c_=EZV$N;ct zC*{T~PIcJ=S0x}q|H=TCH5!YqXAj(b!+O+b&OaPO4#PR|plU&A_O(gc_~wz$V(W}Z z(F}=a0(KN*he}IhTLd&6MO{}Bu^y*C9JHeD(O2;b0s zB8Sud<{I*+@G<99oosFn3=mz$X_kns_MmA@*{bN#->^P&eeUqu%KCbNXi2pijdWp7 z7ljxjb!Co+wTl)&^dY3TwkG?urbNf%GOZ^GBU6nH*iyxGbVn{eR5(J8M=kASE=A%U z*2Ua$C&(()(_FtS1 z1!rb@+oSo4-E&#C$9{F|)FhkEs&-D+if5i|Z6nGy^U3$Xq5}g726x*p%l=B7+A+F; zD9n-;i$=r71F{L}WxtU$ct2~T?Oh-XD@#)pYgE(DdjE6%Dv8^&RXtBOExCc4?O+wz z(35f6;jm4lVVzErD)7nb!f(QGB9E@>h`8~>wurpOQ3tF-nl$P?GMzY>X=E($*bL>G z-OvPC#<%3>!p{Yyq|L=^Kc~SmH!Mx6dAEj`=$2g439-;v^laTx?#O^k#7mQbv{>=L z_0vd0k#%IobW8A(vuMNg=v(z!0%bWa4@TWf9P@CnjtdkzvQ~9HJ*y}u1_b|E&d17S z$GLqn>4m6NW*Xn+D2fn1GA|lVQ~eo(Ii# z9GA?_{8o_Ck`z(lca_mlT;eb^8xp96iw*fLjz)=392);iso54p;!- z(D@^V-`l9g{17T$p-ISZIx7HKvMR=Zi_HE8tQp)U zj><(c5c8U_Y0Zvi#mmZ1po5VyQ{sV8X&18Q$Jd_9AYiT~|LZ0RaIupMFRoV^*pJMNLs`@j~Sc1<6r&fRuO((jB+=L)#{4;K*%$eAwpDG3{nkygf4x z9iV!sLI^!(#;S#|;TD1HPd|$icLUn{7+2QmUE-)XzoOuwEBYL)JJF62i?*y6O|tFM zYqc6qSNL-1_yN-T&h&ay$HO@Mt3&`KKFRNWAt#L1*8e<5<<@!KIi4GwlZCKbDJYhV z*RKkRlC-ek zno{2l($KLAaD>!wXbaIG#BngcLysu|js#<|Tl%RJ?YR*xUdN`CO0B@_)MuI>+x{VVMp5i>Il4rh+)hy(PG)tLmfKq3kF+L)b12jeDWf z%cE17pgvGn?$QMK5sEX*`!lD*=oO^ky{p$@oqA@ErPX-4^QO18|HaPS>D1@p(BC+z zI{<7==P=B7+&=+>^I}!rs0XsfH|VeF^i6G66v*aLS=LAN=kfk0-28X#d<|P$%sKQ< zr#geSo;158qhoeTlUiLuG+yzd3zz(ByyaE9-fA$H~ zlk_Ar&4IPUa80vA{Dyg2ZTg4${fb?d3HF3M{Dz=Q;viD-Wsz-Zb_1}^(I4$mE(vx{ zG?zF9y}zFE>Ndr0w{b_-oJO_9_(VcSeMw>RQqrD|n7ki!mI)XT5^YIm%qP$fKJ@v# zfnA|I64=YImskyGir4}xeTT-H{L!S6$|f$mdw6caCs+n!ASg@UGZ_`o2XegF*9Sv6%?L-I4Vk^GEe^% zb`c~-diE;tleD;m{lL4dtygDK+P+j3sevvS+-b;5viehZaG7q&rB+r2onQg~3zv)~ z{^U1IAFTlq*0VA*NHmD@?~pb^w0x}7Lu~dja;9mhCU+rrP%Hz~x_JC&F+CIa z+q2g-gxgdP{#Qdcg4aK_ z0_+!^ntDw*%%0gt(L<3u9hhzivmlJg@C7cAORih&7xINV& z_I2g9=Z;xUrH)_aJ3*6ILdVay=Eyc4)8=i53&7Al7T7$)S6C7nI@&M~EO*WuuhzoA1TF}a4>#9R8&V>u} zmd3~5Vj*i-*M@w5g}Bx!sP#7og@m}0g|6hxNu`qS{3K;YXuyBPMg*m$wO!<(p;+^3 zm=GdP84qV20ML@CNlQnoevPEYYqHq&p9_D}xzGwihX^Q2ALeVTKItGdH>*lpUapWV7KwP$+A z^rF?&*4y@%%|Q$E-w>7ZoebA&SLW7*0~<#`wys>IQR%^)uMJBtq+mw?t;3Hx!V#)d zzDqa4km>|sYP*vabcw)B9L}(N@H#X$j8C0gXQ7M0WwPT`XDDfmSp*Z=%Bw`v8n~Cd zBlI6Hp;gH1QisnVhqMeoFBFm#{~dd-a_?XyE+SsP>3>6Qf&cJ^JIt;gYI*@>H_Suh zcW!~Sr}0I`YV#{eIR6~^dK4syvDmYq+z%@Im)l-WW;j-c?6-K1G=9JnJx-uyc7jzml`oE(T5)`YCS{CX`(IQURJJ|(K6lIu;?r^Dli ze2G%p$&VZAOA1@`o7RBLe2v@*U+^X#Iv%{_2h7EwHOdAl-q1z3OE5Ue{rnp=l098z zCGK^Zc12$g;Q~g8kIv|qaA-(XoJviSfDXmCQ_g3%>-`g@1YDcf*kgSBb~p=lFK!A? zp_8!+zymu>6q`!Did@r^N35A-fL zpLB~w1i$`S{KLO9gilAjq_a{Ld*JcUbSFeMs@lfcE((IlIlp!;U1Q+Cv_{ ziFIwuq=i-<6mv_c&ye>}MbKz8#Z~5pi2p7FlU>#j8X!ORE{Y`1XNg5}RzjeVHUo%K}kE8Kdkf-XcVVI8zWk#XpHsC~| zm3mBPjv>9cO*wTmrEZhus(`)-hNfYEN?$JGEqeo(& zw^h7x7O2F@@kNERrA{o^N6*st_!iz6Uagb z+f`3CMbUVzeh?(s`}g+xitT-S3V-G?m19z~ChgH)&i;g#v8=ZMe)y5jYLbO7z9*7v zY6{fKCbQ^(uY2Ka)L;@b92U3~pwZyqVdAPC9Ox~d{KZ9=kD!C5F_wdT(Y z?s$*Bsgjp^W9~uPDYKvCIBIkpZ1s*4Z(`c%bOrqv-Njs8`u!qCz$WPvaUOpb$1`#nk zn0+Mch--%K8ysZV5A^?{4O##H(1wI8j9mYVWT=}k^N&CYJAC6AiQ6dxUQ}@Plc;%C z68Bl$*!M?fI{iGPq69+MSKyZGJaNeB$?J#FWs%kSaAseUyFdjx$W6==e#zZ2+AWMu zjA_R1dYJ>oXGMWCZN$D?E(mPQmAD#eHU+_*8?@hXWe`5YEijRzwjam)sM2`}U}rz= zJNSeBHaUfCWmdd2ZMxeH^cKwY0x;r@a_qYJY3ef#-zGR+hRtS`u2b6Q{tR6uaX{^P zhpRr;ckcg^sFJ4V>x$pV!NemgrbSO0E8!&XLd;EUYgP35yT?6jt9Iw$jdrj}CjKTO zn{+M}mW5d+ft@s-4cr;}`jja^NYY7jub!;E=-lZ&fDSDqiFfCq9_2-=yrURzVimtz zjpN2R%dus3r?_!=!1&Y7CCX||fG;d3?*9g#?f(hBil>7qA%nb;m9mR1A%l#mowEG4H(8W~L)Y#tS-<@)V%>Qk2a&{qPX8hlH&@D}A`wakM&x`ux z99Q_}3w;#fZvsc7CONnXOZZUuXyCXFsS=_k>ED-}0+VZ{0uyalqI^W!h>!G)oROEQ z@%bO1mO@VP@J$dIKiqLqLHk)LanT`?f0*9N2g+emf~OuJ!^zEqkpco6`r3hb@NGq! z$t2YvQ@?SBg`5$E3!#4sYod}0VMfhBiw?LJYULZ*3R-!!lQXkD1!82 zqk{n{p@o3b2|W-xsCYz*0YlZ$lo|pADWQaBp;svh9i)T~(wm51&bjx#ch2|j7~l8a zz5f{h_{YDq$JlGFtTpFcYp=cMoNLWF_lzdA5whx3cspkwsni>MfjO9yzmj86w4r^$ zVprrx7_4Vt243AM1@}}Q9Plgpj4;eE52@2=9Dz0lTmHfWszZT#dtJ}>Gly5xqqivN zDcjGJ?Lt*&eqLD&J-fNE64QD(>z$s`3}0NtvnJvi_ zj?Nk#yK29Q{Y^kS)?lEyRV;Io)JCs_2)_|iw4D?2?3;>iwe6X;%-n%!8Dp%0e#doe z=%e3I%;-0#&oHN#yh60mJ?LorCim^ZNZSqIn-<#YO_fZCwrfF2$}(Fb=}xKG`tYb( zTcu8~$0|kThnY{eYA8L+Hy7Ia9U8Em6ypb8LEds9*oXZZn`R^GKQA#bIUM)dwKTpz z3*N?A-~QOQnRAS(fB#O5t=vuj(=(4ns)mU#i`M5+XSdvNcpg(#A@-a7dM}3 zH8-rjMUtbHA4IvF%rFnQ(RHLsA#M9-G{^VqLV4S(dvmAg@L!4qZW|B{0#gAD*oNA3ZxIViTK0v#vAbOB44< zCd;);oS&gn-@?wa?XerU1D9^wy?9pap2MAFH;>Jy(~oVP4!?tIoRRWX)u<~F;rZl` z(N1h&O4p_~h?o9>1W>@uLGz7OWu|Ya!8JkB)0ozZ&mgNg^4DGF`Q7kKi3Uf=>iric zoDw`Z0?j4c*!8T*FiV(I8KW@Qclg*|X-49uvNz22aUYuWl~`z66Hfo)vef9}Shtia zl)Th(MEbB%fqDj()z#0Rd$oIScY0CgFms?FR8}dp-ZjDyO@5<(WA2=w)nE>@)9}?2 zVIqs@q4C0ib&HLVNpUspi{&H`y?)q`M$+MOCJDtjxgGK>OlUTsM2B;w-sgO8OT5U6zl5;^d7li9AAcM zKV|^78eZ?@NrdVZ1Jt>V^JfpwtkTfX^QBgev}HmM=i;qsD=%f3qX zIr47iSI+!en5kQnjQPe;!v8ay$JrW#&slq3+X&nlT8zX#3KoIuy=WU z-!p&*O3mNok&}|N_e2k!0}I|m;g6%`&yjR0RQV_zzF z`6H)wi|3a7AJuN(rg9xBia&}qslkk8ewP=hcr>M?cw~NOU%#hicy9e2s-_~3%-<;a zz2!rMr=78bFO?R_hw7Rg%6Ju71|#R^C1=3?}7<+lss+W41W%&ec#2`=b-~a&C}h> z)8qHPEAU9FdAfNbjJ+P)Q8}o(gP)6?gOL`MmOqM35RW~496b^4RJ8xWbJf*M{=spz z9bBB8sZivksnj5CegDA!M{oW?%s)CHY35>2<=*m&GXKHJWp3S+{<8%2YxvhUz-3)c z9ZdiY4FEtx{Q-WB0n`9zPSc&CJALL1-5GlNGiMntGccSx$H02=()r8mtXHnEv$C;q za0^`L;N;_CW8)Fy;S&@R5f!<5UHqoF@J#_>5#ir~(9qMq(NcTEM9+LfiiP!_;aMOXJFm3qV{cz{Vp*T8ocw(g2;y0M!uwJ_ z{#!<7$nsBW#saqC>KmI>aIAku^EY(Amoohc`QJ-N0q1F{gfY=F0qy{P%Kd}1|L3nX z6rj3M4U`py$oJJ)rN{n`kv<0Z*sVLNUzXqB@jLF>DgbavAurb2^TOgfq<<+`WbrR# zUEyjkUXwXP_6>ob2osWSqVw_7pt`Y+Z0TN#C#7t3P7+{HEebDAG}JaSlm^XYVJ zUZ|ZzMh^`RcFYIh`}u%l9#xwn-81P{H69k8O~PiFw>BrChCzHlrI7lEd$SXS*Ivt* zSRpaD7j==v+b-D5qf=eEZSFuLX$1+BxTK{DI|mKO&lU<2DRA0a++F>l*2xkV%3wLn zqkh`-*oLcB(ywDEXlB1iTf>rJIp{b26V}MJQ2KQ6(SUBOWekensxWsUiACf3=aBnL_O)i#qGMo}s`k?j zKvwCz%*q{z#&Yya<frsg1U+AOu<6Z>$asK z^88WxAuk}GMEz~xsjp#Wih@PH-?bOX_9p6D5=8%1HwP=3+zPMa3Ko-g-abz>q+o&S z({V){6Y)jIFXBv~oi#;kYj=^}Am%DXBUxK4nT>xRLzr_7=^AwhB8MZ##H=*38lkv# z;Cg(l8NmTnw#E@N^?v1xdS0Ek^$$d?YK#S|3zvJ6ArL!+AH-#r5&#&3pqhul{Q>P4cU%NKP=QW4bjNj_ikR?y> zjThq}7BEi5BxCJ#CBfH>y&_qKJ#LA+3iXp&3Hn+->N_3-u*7^9>~@sG^Hryz<=>t> zT|#%t``$mE?qaLlaxzPFFPOIYiRiGo=@|Q6<%G#kOm4D1y*pR8xhaJq3u#&r#&5oZIP`< zvvkoRTOM@azR+>wd&z571=E66>xRDdXCt7nlzTqSo2l;Pvmg*;QtW$_ReIG}%~v8p zA}yqf8ePkQ)0)gj%v!_dgQAi03IWBo@tv1qAhZ*0lh&T{?=H6RCC`z?$eCBW>>Z%E zwBe;?ySOiK-0s>`8T;)%*|PMOo1MFkkH*Vy;qranQ9_v1o1J*`!8pIwT%FLig)Y3m z=t~nTfx-y0PX;<_N-cAyHHKW+m|P9o-NP3MA*2Y0A_AmEw=QjMAFnis>SUMeTk|0D z;(wsiTwjjJX}z+L^qI`pO){TD6(ucBT?)3gpMuctB$kz3A#EvgG%e%u#U(bZu*>xh z1+hVq3Dyhv6c__z%!3BJmS=9g^tTljUWA@FYYaPVXRK-~{gvDV{KItBO6)BJCcw$G zhLe9Cy}3h9GGTmvF?3W<%%d?&!1Gp3eXk^VXVHDox_EH6)Uex(Q`FwtRI`v|l0Rni zy7ObEaK!;ZUdZRiM9s>2T@&bKvNCtrJ$R@)vL}Ius|RaLcI&L6*1Vzp48Cx29tCNbH^49lwp`Z3yJTepd?JhEmj3@;c--pW)?E6CXvExE4<`y#4v zP;TI6`6KRyOzOf`{`TIKmpJ~`xaXUn7v$8OlDM_*QZP7v57H``&?pZPGAGP%u0_W; z(DTKc%EC%+RYVv3)JL{_0vo(A8HMyF)5Rs3I{D!C)^QrD`bh=_pG1!}re649Ul4x* zTp>(=il_fA*7={6FGU%`IQt57O-$5Q$<;%~V+Al{3tlZoWR>Kbv(#_I5Zgm)tU(SI zg|0Frp9CDQ#MjxQEc3EQ@uhf*86O&`GI6=WF4*4Ax_Qhh65Ghs)**GI5<6ajT%C{d z|H|KY*n zBb@pZ6LJMc;N#i7P`83Q+g)E1otimy&Ai=(+g_Y1m11xPj<{V8T;Y%QE2nT>)w*MZ z!=YeqGE~*BkCX4ct)#S2hsSh?c=iX=92UtYz-r%K(&0o+}zmHaWIyY9(};6q#2u-SDSL}@nZUx7tJdDvwSuD z&~iEnT*)hG&@xM%=}?x?A&t?J`>ufmBWqzB9UDf$7ZVUX_-#4#amCndcHS?5PWsc) z48d-e>>zhMSVaBamym3xT8PPvTSX`L1AIb8aau3D1`?byd@3i5J<-ZrIftD%Y+r&y zt=ZWj4&0}j3_d7Zk*(zV+;OHwJA-Se<;gL%IzYaUo<~?!&`zZQE^o|;`mLOY&4^MQ zM(Vj&&w^{S$4=7rJ&HfQ?DFu?xRjwv`@FrsoYhAxa zkK9zL+{KJ~2C}$UbiI4aZzBLN?OiMwYy<$OfTg7f{J=GY7n_*PLxg;>AX>r%^Ict}`6jq67blMvm~ z$^7(kV1YbXCyfFYaeWH1?9yx;vkw2k_;yZv=%8R>bno{1m2|gba8^B`Ys(x) zFt}se0=OSg8vp50(~9bltXr4qCzjCdiA8ml07<9h&KGiu!=>$p507%;d4%3ve)%-` zR6w;MY3TEajqXW6UwefHIkuDI6#iTf1-g+ z(JJWOFMz<1IxZ_p>*vpMbXMC4V?mE|GE8(R zCYt4;LC*#OeYN9D?lMuvP+l$O`l5LVHNf4O4V7B_XtXOkkp`N3NKiF$hpq znEvZm=`%7i_B5yqR{@Xye%=JoPH#zJ7J{J-V=$V%e@gU!DKtI7YN$`UcSC$54^H~o z?m>gzW8$V^eX(Fd4+^L%()Co=t!hQtZSd?$aXnCERzst}$pDdgwRSbU`)G7(H#u~U zW8l*dJZ+a&AA}>wFUbAO+mL)-(Kz(|6zo6h&W_Nf?h+bT#D28JRZP-t z_@x~EoE`N%b)c;GGi*a(vQ2ENxcC>q!f8_VwM8QodQ#R~7x@GH&4%9PNy((@{}1u+ zWB%_NN6Q+zw=7|`e~fcMsgQ(svuNj#ylPogjKMWxRTF$)Eve=FC58_pyg7tnm$%4j z#Yg!*5Wk@n=lae&FRCl9#Bzgmz|3-51OfD&pCqMco0v_()+m!QOg%ggi||1heJ;ZZ zwM42_7bc8t#j+hD>jkx$D>GeGTsada0{irzTyU%$a(XKOIqYkNU3l=iDM3{J_)e6` zqIU@y6U$-R>}=i_hYwdYyT$e#S2dcCO@}jKn}hAfoojyq{B48L_Iq_< zW0WD0!^bA`>YirGa|Vj)6a`iMr;p2liv^sN6ETU9qkaAT(B9Oo)}YRfwvxDsg5&o5 zAEbehZ*n56#xdTI&ucuTX$ze*xI10$g~vR{H5q#xdl| z>(WxZb#EMCU zmkFu9q8H{1|1r71HUxwG26HzBJ%3+2I)_F@o$xA}*Y-zlxO; z%{y}UC8}+E(7&;9J@|6Zi>P~0EUzA~eZ$OGT|YfB-n*l0DaFC`7<3)I*jhTEKaWS_ z?Gk&ja4G z*SKBEWHW#QMvCiB?jWG@GY>}3|Zk2xd>0<@Rh1&34e@VJB^5Fe){^)eeG#pq=O2cbc#d6}7kq)QlDDKGj zo2tT%CZg-li0~=Xq%Etf97W}wwCEUo?ZRp>CC#mT6mFkeIbdpL(n3VE>MxYuIl%;b zX^NV9IrbkK@-5{1SpD>y{90J(PO?(W!_``>dt0q}hRx-Npfq45@0Fe%-C?b({AnLv zUrLlo*tQ~R7>wtrp)6pUs*l5yM{y~j1ad|?bOKNA)3N3>yWpp)V{E{1znvp?&OjO` z*wGqQTR77oX`uAv`2{_G$K!QV{Ks#OrS@#KXJp~ghZc0Oj^ZelwV_*q2{=w2Sc2D- z_`CNt);LeO(JDC^%a^>0I7+{-OL=RhX)oIRF=3~`L5-_BFmc&3z~8Uh{_<2gXIhe; z{zU78`Mfs;BzBj@Q^UBDoE(!a9QFoeZAKe)vXCQt6MQ}D{HD(JFvoh}?S!oeo1h2f zstpfGUj%#;6OwL-S?7skU z`pd|J(wM0cU`F~YWXHgR#(CZa%2*(BewnV*wNqTaBO0d+v$My(sO#y^L(AhAGT6C0 z7%YdZUE>Q({Xtx1eCVQW!LJ1ht_Cr!;E-xNbRf5n3$tQDLfAr5@LJfd>~H~cB5u3WLpjdii!29d94{BVJbHekbB%eGN{ zMc8O^%_yWlh(VsCVnlcayX4~v@3*#528t&2_$%-4AdR5Q5g)}W0w}7JXPHEE@Zmy@ zuIuRD{%4_<5`+G_CYboFtAKV-BwW$3OU6<_-d*=L=SF@FAY}ya=W;?Lcxs5 zJ@G@^0mkzi?L* z6F>%ny~RFE{t4w)rU{wS5nqm_gQ}wa zA^Fx&woE-T5~Keq`0_f3{eb;yVt7)=i~>o-6hM~u>Y#z%;iWzWK|m(;UN9(JtM&7{E1(H-vS8Bb4dglYV_X50TNUD z7Tc9mQh}evQe02VMdM(7UrH`uDPU2-*fWZsxoq3L-0e-G4~J{*(sr$Fa!Q|a>8jWc z=>%ieE=UXb;&4myLnS_TH_9GPt@XUG`J7-}(?)?G8T!3^Kx5&3#*J-4P0yvh56aA& z1JaTOT9_1w z4&E1hcM&Kk<&oP$Za*)aXwBT*zUivfTm`IpXuDdf#w@_Xw}GzRM*e6A(7teKUL%h? zf>@>C5pp1=$Oc>1p0hJLmgTq6y-xD-5oO9yaUS{?+%hJ<)JVIfS6Q7=uEt%F5G(IW z$R~(hyYLO)0=I(A1(``pWVOzS#h#%Zy(z2Z2l&*z3msiTs89v?kjB87=EG+(%1lhk*CvuI9$P(Ni zp33_u!PaG(HReLB9XyTXVt2k?$vB^G>_sc;$@!`>e^ysS2xgu&gEJQK@By) z1uM+Y0Ky%wxD9n>j3MmXT8!#TOjjCW6{vo^%z&vHu|zOIBP{C{&BG@2MW$-nT}@By!+3d z=l{Lz-=R0sGLGn$KfM#ab>~OmNuNuJZPB+&-|FH{CZAN${jtgRza#yh<;|{P@3bR3 z_hx@+n^U}(pC6}KHG15sc&hLA~fPCEQ`#R_0Uw~6%C!W_w)B^q)mh+N-BF@rv zxKM%iK$8C#pel2X2L6`@8n+&NPUXm$IE}IpEK2{7ZJBB^mHCeOgZg5*u2Nci^|I`e z=?9`mG0f$*v^ljUttq}CIbPeYAVXp5R?U@I1aD2~*)w1H4wKD1nu`0#6KQ)N_qTXU z>n8M*l9%)XD?h>1cy&&9YsrQj*=TcqjI-KWfmI1)m6BA~@P$$RO=%4jOH%(X|GB~Q zPk)C$V{w($BVLQod~g(pM_-N{QFv)r@4!5~HgMarx~3+Hv(gv5hq;FteS~MDf$Yxp5C7=vUv9+TGyk56I()qQd7aO^RCCTm-bP|`0-3(_ zZ9qwL3AUuRBf2LVd+O8q}P8oQ!;36exd_DQn~Z-dAmrX*2{y-46|e9%sa5Ktj)ENs33_(!$7-30r|xZ z>Ri6WpZD|H^>24UasM(Mw5GlOxwTLzOt(bWKac`14SOM~EVtbNGO(_N!fF~gSzs!r zB?xGX@_3~i#6_5+z`_fX{~WEX3{_IO*=iAI$GW-o3-AW3j&ay7q-w_c4kIQ3DO_)@ z4`3>>S@FWNt`I+pb$Sfn%QT%7{%Qj?EW|G|6|$@;QhdJp=pCmlM5)~X z$b~TzQujN$QWOpkQ>sia^06?xXHtxi&)bQ7>M?ixxG=YY1l=4{_7KnU%R_g}XNUZh zoq9iEE%Gq`5-dS16z$KJlJ(sZWaTOP{*ypkvDvUSVM{q)J^?N_#FM_T z$xG(?qsO^+eM4|{QQZ}*XMScY^n5oPgF)Q~zgo2=fjwhR5adV6r+9+OYpm)m$b1rrDNJ-F~SJrJYEa(v&lK@d3B64ASPy< z&j(^bi;2^i(g^Y_7;LL9wcwt~8j7~eCK~AVBhfu}(1Fl_8Bu7ev~wBpyyN!ia&Z~` z*o*?>DFS!Wx;9Gmm9Gtw5d^+kNS{A z!h~xLdnSn%%Ca_~Wv?SnV0dLQUZUm08(b`u7SlIWR{Ok0LqJB_N6Hcx-K^1H_r|fZ zP59<3ecb>iPFk{tzKv3;-03*RnfjKsB!Rp`r`Ts@r6teSe9}th*QRDveZoW`qu(W~ z>?)Uo4sAVU@EH&k=hCvb&))46$xnsvQHBH-HBM5&{M-l(GS)wl1~Zl^!C;0|dV{Z^ zK{ca=12~%Cjs2TWmsjP_x{5#zK#)gCuf|eIN=qS>;4dnE(Vtp$iBNlY`J}AIpQ?#;s~d1VM9<0HSF(s=d$f@}>Tl z^85u`Dq*vR^~!vx{Xdp$I@{j2T`rKa482KQir2^{1F;r(CYD)WfYGk zN+yCPNTKVEJNLZ0C{iISk_d~H^-BfmI8vUIyhK8exz*_clvm(t+xKnO^IQH@*1d)M z1z3%oKD=;|C1vizlsn18w~SzievQjoQsprp_CP|>osgN~LM*e~S_w8TiM2?oL2zLR z9voBbYqF)KqY}B$AYkHS*by1EnQ{h{HYObb`Q(w0i}`H3<`9>p^!}dL_ekbhx!MV0 z1_@>X!a12wurlo7ilXZwfDDI2&$1IntHKwUQ)M(<|?Wu-Vatz zUFsn#l^kuFR< zsN!Y3<`H1LMF{cza5Vrq4Ylg9gtN68vR;5BgpAqvY%#yO#alICHZWitKyn3LKPiT|ilYchAdg>SM}Z zeuKr#i5i7~(pC50DbBEL6!h=u|_&Osp#%o$3i(*SyH5o6F5^W)K(W_j=je zsIX)E758#<4CvY#)A=z;=sKN8>*A`J_R6~q_D0_xP5GoB);an75oNo{-+cdcgpXx)orU%l>b0VE)?r0ix&- zjrqLsLSl3aAD^v1A}ils=ndrh>lyMeGI!$&#UKew@Ac1k{_kaf(ppP9{!^U=r2R?K z>nlB>t2&;%Qx>uD&%j&}?E#!Hw%=J>_@RM?gwPzb$3GeC-Fr&-b7hyIu>#jAT3nV6 z8~t{9n1fs&6B84QcEsIlI;VByqtT`OZ>foTJbw79c+?`aO3-D2npyA<;H=}!mKr~QU%aMAqlvMXYHNA1kI7;XNbKvHDk&)BE5X=AeWQMaHvj8F;mnkFyr$_zV5 z_Cm+o*O1S#Q5m}h16t}d*{b;1_aa8-mfrCR*4}fd| z*2N`j<6HiVSL7s)CUB-$<=2)uAK@j;+-d=f;gixzz!e|EHPae75?J&qz-;52o?DM< zaJk9tjEpcRh;h00$Tgxm4ywl1pp zwZu7F0+|u%Myvt9FLSTx- zs7rAfHaA|tPVD#XmVUTxB*&MA9UQAI$=S?%mzx_Bm;Ve+Gzh__ccW0x77R|CteW0! zvV%Z;Gm@m+?CeNuwtOkX%wSI1Yn&&vfyo=$&B z|2s4n8psCyhPBQwfajf2@%x3pYgh^YZ`82x)g?QO<>I=|tF?jQ&S}-G{66sX|ffre&sz?GsFGxR<9A zBq~V`NeagZ<<5d>w;l~oeKWu1)31O|mk;vZ_}Sfa!RDa`31=r=k~%9_%NB9d`0Uuc zq{V|A&sz|c#WNpy0wMycx<3D~#k~E{F!At1X&bI?5v4J;EA*6R3lA6Ntzj}N6Wy0S za~D8!t$Qmd#HeHW=*?Wh1mBh6VLSw}r~krqQhExTlaH&P{c1D{zebk!4NjU|@=f)8 z!#K#XBWpSS7dQ^1JEnwZL5L3q9ZIL1RkKwWYgN~g~DFSt-T#CHfj*?hXa&V|}ej&QFX%e>}5ohT# zTaX{?Y!!=I>LM8^(d@s^<~i@|+(yg6Aq{AG_WIo;THP{=j+XIxAd6IdcY``aeVFsz z!o8q(84KdHqE{(~S)!#@sVj?0Jb#(6f6DypONapBWB0c$EV7gFV=>YcFa#=~=V)=L zq!{d6ch>3rmgOrc1nC}$FIjU$JoV!l$ zG@*e6|DVA*6YgGi%NHvw`$k~rOcFI5Pl}l~&aSCkPnYu!u!w6Id1+?l%6a1P4POAU zUW{GIwo$AYFU#PE7H{3(p+>$+(tk2v50SxI5C+F916`IZMrB<&o`yj`OIX0I%A&a} z@ZqP6Lbgm~Ll)C3pb8OBOsd5@Dp$WVSH5VcO)LDKQ4CSD`>mRQ#EitZdgq(x7UO); zYf6O>B40oYi%&s?Bbm7D-!cLklLe)0A6g99)LmrU%D6EwYTGFa=XcNDl4VYDDON;| zf8X?sl-JeO6O${TypOF1UWaf7)+hyCq@C@NIQV&u+wgG)Mj>O0tr!Btq5OU@ul58J z;uV~En;mM$ESunpc(@C42=}aedR;Lz1BwC*LIiA*?hm>hN5*hg5f<+)dA}-FX!m*_ z$B$8tHfq_21h`a5L3}X>QTwMh$4D3T{AX?;+TYxbo2^*2S<1C6!e4DEfn%qMes z{vPU!Q9J2eXn&xdV;QQb<7Jx3OQekFHm%L->V}Xh}WNJxGCyf za?>uvs9nONzZ52#SF1Q4m2Ffm&C!oTeJ*W@8HR^LWsg2RL|Yll+b4l8RgqxU_AFm1o)ZKhw1IO1qY7KO5?1umFE%=DTiI>TW@F#R!p_;Z`kj)vn zQ9ZW&ZI-(MuK@d|GhMo_-xf)tHHZi#%wxj$!AbfASwC`vllFRa@qGVf&=8Y!lwA_8Cv_GO4kHfK1SOPa>Ax zy$qcwb8|rJyPe1D?vD2&;=8h-QQ`4lfcJ8a;TDcLvAU7TSFJLZVkRznI1e?l)aK~r z2Z|9-fTwb%k!26^MH(HdJU2O7Ybt~bE8IMsU|~Ca!~EQnO$D#?1tO0N?zXfivyqw&`ubK_xJL zAn#*?e_q3auZyd*^=V>#?lT;*sqvLq5gB6Ukoy67QbAnC(z%wzKeZ zzV6FfbOFC;6XYOC7sT60`e8Vno%?K8suO|DOP2AgB{GIlJ*4^N@Ei zvwOF&Dc%zZYwvnpB1kndExkH>x;sVXOR~Isi7V;$?HmK;vJr6=?h2*h#bhUw0Z>Nl zSo(?36P=OX@%Hkt>-?au=IFa}3ia-OQvQT_)259nhAdLy6IEir8_^vpV&R@spfMH0 za(yRD1h)Cp&hwl{aaavmFgq+!AnlG@QImL-60Y%+=Myba=X00;#9#lMr1_KK)(yL) zKbe&&{)dLMnQeE>=y_Q8#NooXKezoynx2n4x6-KDB>4rrbX&2Xx?ooyoQ`1p93vZ= z9I@4znzmrzmF#?2^&J>Qqx6qP)GN()+a&S{C@>{&ckl>6^BbHOm&Gf(Ka|!+yMV46 zA1~-$8hW@VyOdAh%H(i60zAf|z_Ls%Gh7AT-8u6+i*2+NbCSO_L4K)k2XLid$X=># z;(O8`xkF7^*}N>*Dm6w{_)btubC4e*-{khk>nf=Ed5XTr*wDjbIG)60Hh23eb)EZj zWjevcXP|MiquKUkr*9pQe-p!534Cih;+Pko)oV>jzM9%sN=2O%X6(;1O~?It7lNoF zwQI@hZtLVzMYg&71HNuP)P*oe`v^qo{oZN%bD!9Ox+9rX!6VmcL9w$~sKxl0XSz7y zX83}IpSx~tr;vu`gL&R8?FRch`Z(UKtUY6+5%-)*&3VK7ZPay%Bo6BGB@D~;_Cu#i z^$)jB{et_0{ZiZ|a`F5!g8!}{m zv0iqV)bau6Dz+~qySQg6m^O7!VtDv+q0PmjiZ^qW7Vj-w_>*;z-6Y@&q5m*emHv!W zknY}KwyRCsx??Fx!ZhnR+M<^8l>EJymW$H%^#kg~h1x3V?5X*ZZO9TiBDDE*B69QK zwWoeCtchgi6{eWX*E$aY^+Xf4!2|u)=)34-?ki`|40ei(fGa<|CLU> z=}5W7HKBjjw*t}X{Gy#=yzh!B_*(70)2?+7{9y8H%kaQH?~P@2X^B}jN38X76v^Pj zpQhShl63#Mlk+0>Pm0p%`PKursqNqGC|mvyc9cw3w8DGFdWe*Sgmw^r-dgb%KFYAE zd9{Mg96sXiV>Pni+mBe8!tK&l{sr~Fl=%~>|Kr^#8k%XKY!4=UCk2V9AG!)-m;WLE zHIr#vULe2SWhFNkE-M1Jd9W5pDtyO(_g|v+&yD{bBfhG9_4(C&eDvEfU0ljwEd{aq z`str>{A=ld1BL$@0e@}%@7Q|!E5v^>_wSf{{~`&0Y5wovTYmxgFJ}HN^M|&#C9Xxg zW^6{(GbPkF=nsl?hd^joxu&d(PaZ6h^JJo^ zZY82SBjs8!uz3?wX0t8kWQg}!naufTzneMRsc9M{tCmdhVb**XYiX6ZAH0D1({gJW z^PESXc>p*g2PcnW|TE4^8 zmz3W*ytZ(heChwY#rbL5(r0bN3OiZ^Nug&UlS3n!$8kOeg_ANwVn>7Yryw99lD1>1QlcY`)lrW)kH3(FZdiUYy33yIyhFfeNx$wKBe|_+Rjvhm#Ax5 z@SPd_U`?@YX`t$>74!I_3rCBonb8+PG?4#oG3;bl!4JM~{7uSK9zH)%(_W&>o9OVI z%`Y%+T>4csvLL81`AS;Dwl0=_$&oZbyu@AJ^x+p^k8dDlY1PZVd-Z(n7kAx*9NgQs z7Jd&c!4ngsx8)C~JnHC?vdQmlAhQM>y!9+^nMTzWLv9o7&bPap_(P%MHFPS)=xc*M zb!L(+jrDgwd>G5MAGanNfsOY2%idbOf-J_Abw=#oQz>t~ppLlBy`9Y!Q|y07+|dHK z-rdAFYFDuy56Shh;e+@!P5Fw)(vFE=YtF4MzHeeBnt&sWo8CPw6aY_9as$5Mjvmy{MQ(LCb!uNtn^X2!slvZYA- z1ZW7(0J!;)TxiuOviaPeUH4UVEmKF%)~<%BoRczqPJM+Mh6rB=&hrSVbil;5Rpq3% z7&L0cQ7Rhr6N=SE7BL z992=*>5KV}ncrL4q6xT%tm=t12NgI_me+MG*FuO!wM1_@1_^%YmiRv6v%$sP%ohDv z$lBr!xir%_&Y8JN%L~%smfCfuo;lI=CkaW(8?)DAVk&k-)SGfL1naw2auFpD?`RA3 zMA@O1Ag3z92+7Edm9W_SBGN-OkX^F#@IKPjYEHu_eg=ro=uEJyJQCz*l`3*3XYGtC z%D8XZI+d`N;sU1fh{;+KDLrv_#r1mbxt2FANdP}+bRHJ+1va2EO&KW{j$RP_MncTF zM+~}0>XUKj=PBNuYi>0oT)zMt{63@*xs|Uk20k4@#T3$5>|#zd!CQF*{5$=Q>^S)j zqARkp*$t5knMCG}1rvbsWkVt!{JbS>o0-DD0BRjnE43#R9kr(vo7~`_&?@E+4Lbdb zE4;etx>!8K07><`bopYnMPgI55dCDbSGv=SwudQ>LPpbLmB;35?a4}_fpu5e{6IEC zStm1wue{189BRbp8{p9^2^`qikRBhFIoDbmq4w<%>fZ3H2CMhE!AFGa4SBnn0u__u zg*NWRu|ewk4(NLHr7i^6ue~}Y!N}-qL)QBwiD(fAxDKnBX@$w%W>rNewLO z9%s|DRF?7W`vtc{`ioFA>k8E#plxEjy@g^z zicVh;&8;s7nf6_Ug^TW6c?GkZ+=544K2yLk_7S9s%~%Y0BM|;*;CE|SIgn}lhSHUoaAdye!D%wb-7fQEcr1<1I|VB~XKc#1D`2##b_!8yQ%Myq)1=fk&Uz`_jd{aALE$GookKlsvxW4nI~#IZ74xbV4%7*i@LcrZ66JD( zyl8MpNX8Aq(BSXt@YMJipe4|$q^_YI>g)NkVRU1l)a(C>i7Nyq?WJ;lFhCT; zmChz%v5g#y>s(x8*J6ZQapI7RlvoHHcO3IUSthIcu(>{JT#J8ll;A$ptgC3cuTM)xX|W0qXPGRl zh3*&DGxN}k4mmkoyxnP8n~sXIo2*^M2~1mSc~~$~TbmdCEg}W9B%Y z)vzRndYkmX6a;KYQuM=L4I1;Q6<38@Yu>EBq9tB|db3#sWF zL(7nhlqm+9gDKxsRAV@cvDIcKhsb{9eCYi=;>uTpyO%Aypj`5XoEF?Lo}7DLd+jzX>8|_fzM3Ch(Kdg?VA=acC0VTDB~R@fBlKY_p{(jNWsGy=T#oKsgCFdF0Zx>xqnn?x>nw`h=AJ3~vWW|Z3T~!fBlWvN zA+Y$TfTDl_6jkyo9xA-=V>;?+nP@9WIs)_FaJquvLmL`9zq>0|Ir^YKV2SXR~j>+*xhir6e;-?e+yTVC+NxkkG=i5QPYrIRcgUPEO zncQM7<<#2KUDrgC|kwAl#3ht5+EYKna5~R38WkVqp zC=w{aiiRW*iUbJ-THHOjv^c?v1gAaO`}_az`_6xR?)dM%=Z#6G3Qzd@4Mz) zbI#|P%c4@~g)Fi7%)-W_HI?jJadI4{Z~(mSv$ce5J7Jl)9}znbRFD!jHW{PB*JRRO zgg$?BBfmwW?kC;td?Fbh*Hg5|-r@017~7|;%>W|){m_ECZCS>6qy(~zZ;?ohH4u$?<-hE_sxf2 zXs4r7smy$yKG3kT7Q|Zxzn$kXQX9JD`TH1F*iB0Zyi)Zn&t{^I;{*4*Ukd-7!PCG- zZ7=r%;_l-I?2${(SSQ|8^?v zcK@jt_Ucdd|L+7(vCOzUlo5YJo)j7*#0g3;-iP!7K*`*qwIu^0vR-h5Dx;K?ir^Q{ z;|pd;2FJ-Mbu{&43F&n6IWX##4DG`L8S4Z{H(&f}jz_=x81tJMThRExuIb71iAou; zLX)OMbjtY}3tnd~fAl#iR8UVdeLX|bxa!eQIxbtQCidOF+MjeSH`q2+>GS_jjX(4_ zZrkNd!%eGjbU`5cLyFkR0xJ+d5tatA-#Vm#3deLvLSzs+<0caGTeS9YIQ4)py)e*GvI!FR0YBE>I{#%muO?4p>asovVjBH7W zNrS+~y*Hq)!@LtC`wo+4a&b{CuC*_seQ<2&^aMR)z|0kU-`a_%4MOk77|w-6h`h@Q z-x)d->&92&<$a-I50+SjXH?WGH1Y^k;sY`6yra~BDK+O3a_<0iP*MnQ_x&mShX=8S z*-{XKwqwJ?zDaTh!DJ)5_<;&y@M6+(EeXVBm)U)%wRm{gBHuEmkbH|@4#P`5nDpN{ zO`S1zyuxIQoL9J#F5A|cKa8-tL2h*C&`xjDbIbG>=7)pKB5NU55;~h##)DGA63Pxj zVL+HN3dXI+(H!2SJ1x}x#SZsPdFL|DW4=X9RBb;RVs%ALELNs&|=Mz;1`K#Tf=cP=XlieSbk>A>U`2JO$j9Vu`ws#b^fACLp`se31 zuiRY{!WsmcQnEHVaA|$_e;1>Sz0Y6f&rfR|$%5B(TS?xrc$#WjF<+H1gs{EhDUnUo zj~<8ROxq~j>=52Y(8DcfoIb^veDWQMvMmI!hpRl-vJ<_^fuG5YC~nZ1QOnWsdv|f- zOg+g;Ff%{pCmlgnX-Vg$rJdi-_XXz4HDpFRJe79bQ5g=40*?iqb@|qGDN!4}reV`E zR30}n=d_{WH7Rosq{iLA+vU2AQRu-PTgXgHJR<~{Xrx%DO)jzHc|IPM-%MkK z7~6j7@qI-$x3h2~PQVvv;F)+!UZL>|1MhjehTodEE>+*o^%5apR@t(5WFc8*u%#l& zZt~*UMLGHAIZ}Sw=`&*^A&tf>j_cK450nf}r_bd11b*zZNiVCfHMQd9*!M)@aFbi; z%9HVM&t%TrTA+P-?Nt^YCv~#*AH<+_kw>r@b4I+b!{!4r51rJ0FlQr8-ljbl z^rnfnrcGOmuDPwZ%SzIrl5&1L5D)6O<{$mkpjvD8XR>PG-44r*%kiL9m8t36b^4Br zL+WOGf&iSWv3Dqfln ziDsd%9TosZ0EaT)Bs6T*@v&V6xVQom6{)CM;S2`^tXm~q3~V0g4`r->dvu312T2?c zwGn%#!jwvsP-`8kQfM^0G%NqfmKH5)0RYR$R|DXz(jF$wc?X{7mfLci^lPEC9!Ypq zFdvA0SL0zwv|WW}oCu72Q^IS0#hc~Ypj zj3;YuPDK)RSSN8RiNk6NCRvs<$)$f{@25+xeia3Y#rzn2@ z)eNEkG#6JeSL+y8t)yu1o{nF50T1~8U0k>u2$|rFZg3(t@h()=IHFed{00K}?y(?6 zLO61AZ2auz)HG|8R&Z&fPmuN!b~Y9TFd{@=h(iC+#3s0(oc{jbh4%7%<8u$;1b2X9#n$D*F>bebSMO50vD3Vd56|aW$jk zHid=GT2;5x@9rXz=&_=Vs-aTDA!`eT*TeFB!)#>XGy^>@^)-RrH0DopVIT;rDzH5b zWmLSg09>1Q=c(vDw;%$St4_Kug7WP`Zy>U4mxwxPK#mHn1--Y$m9s7dgn|Zc4c&t@ zmJYOvwJQn~fQ3IktIv}oK}^}Xv$H?SL$n-pCwixBpEB7*M-wpDi49t3=23#KQf4W` zk5q2T>I8ndyH;_}0Qy;(l4iE%!yl*IW^i3KJ60~QCr6p~>c4#pg?RoJH<{c&YS9K? z?TlJ`6(o#`Qqe`$rsLELpz?37P&N`rH&;TPyet=lu(M<0Rl|_&j4Y6m1BsIEF{7Rx zYhc;vj&Xi@;QcCY+q}LLJ+`7^A(ch!idyZjbDe%0;~q=LcB?=6*!yay*uI#BF93!;6G6FbV3DZw!a0~%{gzu@~; zcA7Ct2ln~yKo~=IRToZ#vL{YyJP#`@KaK%@)LRDx()xQwPOhmR;)1F26GF5D`W|9+ zPwwhvyI4HuoV9bXioB`V1s#u8Ay?@bFQBI)OJ5N{k;S`u*!@}a7Llpv!yT@t&2Y(7 zj@rpb>!x$h&=HF(OMpun)RDAX?+m}nqEqljI=D1bUW1^!k>qP9zXxu~W~87%HFBrp zItxwd$JRp0L;5uv$eK9qx7Iw$-BxmeDouWY3A-dL*74YHlmpKTr^Ru4U8)Mbt76$; z<+fKlszPBds*QPX=s)#avMO5%MReNuEQ57(8tya|QGKHYFAZ)D4jYzB=Yw_gu>xdh z2&0cLBk#en%=V$z;Lho^8OVBGVriT3$HVa&4p~(EgCU*@OfnRk`B`3NChowmApZlQ zf(CL8Wbkj<2FX$TH&iD14BFALQSO0`rOSl~BOPVso!cz@B|Vni_ovoxYVkT)spt-M zV%IJ5@nyWJh%X4d2QDxUKx>=I$!l{ZR1o=3=yW8S1PR4E?1`#iRJZH!y->bdRZtnjYHl%sfC z(7g}APIf2eBi9dg7Z{`9;s74NAsyz#fW^(&ba``E9-a4<5LKYEGJtg&n>8s+a4YhD z)O>RJk)Xyaqk!MjPpo(;{4f#ifT|P@OeR|~P`Tex{?so|3$!inOf1Um?na_%zL+k; zF$WA4CK4jc*1cMx>~|{lUKzL8L;0<%Qd4VA_Vfjq`^q_dvtp(s!iSE--(c|wwenF_ z8rC-dT`~98R$kYKf8R3y^ZIX{NKS<=A#EFKC@;@`U3dtMvq6!)R;qX^2aPHSCmlEQ z|M-rp(angUkvJmjj*({nbzTKwoK1E%It!v~s}02!HKv@IVC<9^%4;h`SZSx0P4=~; z$AY)3G$5C4g+!&J;($xh@HM(U+KS&A%&pcP8)l+9}tyw04Em^gt2a8OieH0D`|`P)=3c;nYf zIn)58NVp2d>o`%MazglQnr$zmHF?(CizJsBq55_cw3`+#D$XIHNvn1m2bu|Iw2pG? z&cTb;yia$IjCNOA-gVw>u>fEYjs~A5j&!k`{_3JdZ})SXXU5bQaMm@REp56B#1NV* z-7#|SfK|=8WzD6iO7|^Bd1{;trIEX^Ht!Rotk#`AraL=bXyvVPv9WJ@YO_ui z;;8X68=^w2-mQ$`GplHAg&k4GH%4NtaflC`Pq-3VMe(Mhk`U#RMzY6Azhi#d?l}`K z;b2pnqGap5s(v|T7^2x_c#h)3NDDbtKak4IzDw5}_V$HDW_CyWlg;C+97>$#-w)zb ztnRG-$=p;A0LdVc8P_iVN#I2PJ1%0mPO;9#1P;h2%H)ytI3ewcE9&j&8_6Pi zI$6Zl4=(?>`?wUkPxPWp1Znsy?n|13V5=xkpDV10n1=Y-fia3j6p|L?ENZMiQ=}mY zHf)dzT78Qcm;;Zl`u0Cc*m?JPI)Ia-@uV$#IN-&=_lJcdD2UHezbYB zY5U%fcCK2fADUaP$<;@hgWvXM6eAbhP7+Q(_?O_*DXR8+pBjB7#pfR3`wJ3JN#+|Y zw7RM!Gt5>w8%VlDjFfqQce(y2on8iI<0u-6rG&+0Gm`H6oA8GasE(cceH}lt4z<*V zsza8f>)Ko=&R!kITs*lSg0EY@IeE`&dHV$Rlg>?nN|-*pEWTyFchzj|C*6mB+G6XT zUsG~P72i4dLHJ2m8Q}RX1k*$HIcePfr!GZA#gEro$B8GOADob$*9|NlR{!|6HW2zF zLy0`S$JFz0s{X0})R})R_f3T^+hO2OI%vp*-amCU{8Rn^*GrwvemWB@5nSZVT~2Tm z^s)66bEO(WS73EJ99DA`>-T3qWQR`l85$K&B{qlGja-f^nvMFQsimTrKJ{ssVc8_S zdG9-Uf?$=Y1t5427fu>ehTiJ?~_Su6Ye|yl-3%99T#X#9M^CSO0$A*-YhW|solomcEqb^IgWq`i*W2$ zrB$ra_up>te#E`Bm0=-AcW)lBvD+b+6>my8P)DEqq=QTx=TFP-$qm&C^+TRW z&3Q5ZxcbLl1hKSi={4*6{AwoAA8WN5q_zUm8NxT@+467eQYe)Dw zP+Wi)YH*G;VvGwcf{apHHACY@I?vQas7yfQZhQ1Nv?r5(Ca$L$i97nE5P5f505TRU z0;g&@b~%^b)9Fj%bSgsFhH~J)WfeijQvB}!c07(Ge%F%vX7eN|-dv};Yvh88{dJe5 zV{fYYL)E21b(*Ke5JfWy!$41xyn;aeTJiU(3&KLlqA+bS&whDam4B?o9jbOQ?04aW z^QeT&;9PfEOMnEf`VhR=u#b0X`Mt4ZdQZQXcks7pdroJ5@c zWJ_%MLoSphC@@<5R9Rgr!R1d9r?VH5&?(VujvqZhHw^sI5hnr7X^cvlZqMH95K_We zBpz!wYiVefNVH8SGu5_lRysG68zP;%c8Y>v{LNZf#q!xOvQpJZ`+li&?bEhrL_m*E zVNBcfnSf%5^jXVUPkh_&$EuBcAl`{9DMhu)K8AfWd7O@z&&G0c#>!>DME95z-oYL+f^em;bTa^kqS;#Yp zZECvI-0(0uYf!%zT%PBcGRkPAc}^>OfBtg!q-$L_l6Ym_#t6|%e9kK3YnK+ip0M@^ zPkm%tUJWT3eHNE_ogiciXvv)x%qj78^v|xb1{?O)+#H!zt)9`Bxfkl|6?v@++Dt;< zOD+B}RPmE8EA`_DYf|W*Q`pz=m8gEf&Ny*T1iI$pnV3tlBl0L@ezR7W;OdsVJ@0J~ zzOPYLcfhI|+CzCY3@?nk8#W_-&Wk`PN`hInzHU{|A`EN;4w6W&Jvn09PzmH{gE?gU zZt`l{ri*ThKVi6QBK-UGA4AC1XI1r z{(Ke_g_boe(3#U?jr&Q_^DL1XPU*J!dYx$gDCpcMHDJbA_E{l5rx1$*2${shzKu!t ztRXH72g8T=zEU}!7FLr~j@-n(bTHYE zim`NJni7IrCOpT-uJD2^Qt`Pz|Di0t4;~~ zM3s{C%5;Ae)uRfE^V;Exrl%1d zZM$ZbSu7OhlUnzF(joVuDF2=DYH$8~Q7UzU-#iebkGU?03QvDq3B>2#yNU64;Cw0f ztbY1Nq*xlEX7EB7r19AG24PDpVY}dmXBbPol~`1pM~7^&&=$L(>0KV0y%)DkJG_;$ z2@e>827Y=pXm>Uy;Qe70SBh$O5{&u1t*3XO>mNzg+~pPRloaN;&K=zpcN)=ZKGW`I zHnSIuMG7wHiBc!S%UO)`PJ|ckr8>-bi6&0c18d=xxT%HKI5h@5q&B!F{Ovxhj{6%+ zdHLN~xSi^tNV?~^w^pf{ng9CLFL9m= z!JCQ(?*}sVJh&hl01T!+_yy@7ZpV2mc7O}>dw!=OPxm3-Gjx%vezPs5eTL91hAmsd)H}luk z--@aH#=QYwuqhTS9OY6g#jfvqkJ++l^4jb=&w~J>=pxkws!6tah3612^HO$Vk*Z?UmzVydwu4Ld5Q!i(T#rXc-!t#_lS)K1lS#=b0S!I2HGlQSk!Kq&X=K?y3{zfF$=_r$l#EFq$7fO z=k>ZhMIly8o}X_zh25J~AElvfJ?B-k1baP(Z$(b8ae=#POK|{=E$Q$|5HO6aBcJzb zTD^_Mr*p&%nbEuaLIE!H0UKeH9|p3qTnjL0+Y!P(reSL8(&NI=fm=6iEs&+mXelUW4x@XjZ?0bIGhv3eY&x4(Nw$3u(B zqE0h&O-F}bBru`3>7vp&CnNUc>XUP~#7d=Zx8R+Nz^*YI#ay*^;B}~iLb-7v*M78? z8t<|r$}rsy#`}J7$-JdrwJ}6a;?q=`*BQ&<$MhxB^enu7PeMPv)!9&YFZrZ7knnkg z@LeQ)aKK{N9T&XAN5`;t^P2$Bj$mgi0lx1oHddm$$P+xz=j~)T>Ch^?zT<|6<9VIj z#g4KAuAik-^U&5DPcom)C6%k#x**Y2jqn*|x$LCIp4qgf@zB*?gqBq&91*&|pRQ}WXAotD;D#tLsmRxCHAxn;?cLcp zcgd|iFgS1EvO>C^KG`XCOBm%djRn!Et=}WoR^NzjlVWk?jo^X1mhd?2r!ap=Vb3dW zC7aP&6~kRS|A=_N_3S~X#|_d@tsL|)S z6%JttE=3f>1CpLo(R=hBVb+{&GKesusr?T=;?uH?B0i(WqDGEtTfR8*6m`^FzEQgbo;E0+!tkyh>qoq-^Ft6)EIZby2Vm!=7dGpHqwFK(|wLNMf$}$iB z=jZVsfBkRW01_7I)_2Ffg*{$UhRwtr$NFE&I5G&X<8_+XiX4PPjl9@&o1aeBVZD}yKC zN2Z0M#UM%o9nS2bOk^{vCKgqLF0NtII(kvD44A>@+9|uxaDB^V5XAwkx?7K2%#CK4 zeR+$Op2=`&oWoN~Wo1ij*T}o?MG?%VmopN*jhp!8nHte~EH<=lbJ_nct1sZNTV|%L zfI*GWYNW843+?vw4=PZ+U?E<)8hCPjUSO@M5 z?klwv5?f6kG?Y;eJ&x|*&@^V>U`|OSPpn~GaCs4B-LXaJa}p3Gzs!2!HCDwKTXMw> zbnB=YE*zI)ct6jJ^%F zeP2kg7)4~pKTj0*UJ&Lg#+R-Cq+=|8|2?bEroO*?X!^>sUOSCs3Sn?_y&9I!76gGQ z@=M_W&usOHGDZH&j8}9jM2jtOpU}u<4e6tPi8vk{_OYeAT<25@)5nKKB5u%{O)v;g zHlP($G~gRouZ+K*9Xy~9Q{u;02uAYGx+LldkbZm@M^nb&3_g0M3eW4OaC_D2Z^X!` zk@eC7X*M~H1-(w&aGcSN+;)sqtJ{Pd>bfNA`$Gdrd+kY)J=ka^;vVqIr9|^)$sT@M zm67cVhx>ME&qr~s-2O1KZm9)43lpIvr^c&KF17Wh-2~1_4DcjD?-m}Aem2fZkl8ol zu`3R>R??)s%okB(wnvMwKOO)U_dV|sE1PsuFH3NRmpEkW0J}Y68KHZp{4B*2$K@IK z&$~amR)hma4Zo)(w}tf5AXs5CBzq1 z=iHfZMGKr=t-JYB|EM1RtSCFWu)rzBT$P!hPR zz7{#6S>v`l`X)w6Xe(v=Nnm$r3D#Vv8?6iT9B!l^a4SGgKURc2uCflr?TKez!-`s{ zR8M5qTHeWt5|!48jRn*gHM$G*d20Ahn^CkG2m*jCGT_U=EF$SAow9Y_D^A(d*xAtV z=(t!A^`N8u!}GO3{jR0e9odyGWaNbN_V-=wxX6{$;S)$`NUO4tU!uBOZP(|68 z7n;vj;(czBz+yKFv_!0U4g>H_RZ4^D&)oTf6ei`;hZW+bYu$J@pL~S0U6wYG2vo&| zysqn3-hLU9u9qJgqk7+AjJis8=fSE;9;M7oC|2qD2a-{pp0!+*YIh!S-Eh^96s=|> zFXALr1ecGs?TRzgFnK%gnmv1Zm%8gL{g!k1!(jqauVUS-OS|hAhdx+{WE7@-(N-KISvV zN3NJ-4kcG)^qS&PY-R40S5^2UQ}kUGg(3!?_eKSN4UYO97o~&C63o@>x{-8k1In4} zIkd96OJZ<&>nv}m5^LazCCt4{_EwWU>*6E-N8Dq}=Jr_Kjlek-9kJR6;x4v37^}Cl z^Ec>kf*y>K_CP$)Y{hnGZ_MIFmxqlEk^B{R@KrrJk6M7yFIialm33FP;>0AzHiipy zr~O_XD0sR{5WFOTAit!Zk78}>k`@ve=;+Q~Eq@%J`$`a-GW3sw#!(T7_YsZGxw^NC zH(eTHNt#x%Jh^=3+{gy_-aHGeeNWL@q|Za`%vktay@xGMBBlPBYnw*J97)dxwZuMA zM$(E_>4*MdEj=VbYdgDG+kY?r!7!~d&5d}$2&nc^I}a9?`}`xnGLY+ucLz#Mb=_f)84*@T$SDvf@&u@d~aix&zXsxe?rI?{b|i=Gxl ztgrJEYEE+sV#Yt4|E^%4eGoET=XIj{@!D0#&NE+Y>h_@U&yo@nB*4CfqE1^S${y*Lw}0Lv;dp{(S2f4VzSxIYZLM(`E1Py8n8{nTB> zOQq`vP)X7&3W)~meJGX;V3DD<4dk|V;qzBC8p!sixvu4}803t3Z-;&=fcw2WFV2m% z&UizG@A6df#c0+EUF|Seu%Jin?&7%$l-(<8%R`&qnJyQVxAYE6+&V*MF$m^ekY_pL zG({x+Mv5}h;@Ca2z3ZkHZN3kftEf$B3Ir@O5_IiB1}xqwJ82WuT;+4StA0BbiMPpR=azO+Inh*bVOuO#C$i=IW@Lc@zY2$MERlUUJ1N^hI{fUd zGi?+E-c=^0>{^$(miiB~W~DTX#^plHBWw&~tC**caJ6mVE(!74E{LP3%EJg(gekd& zm9V@uyWb^UY<@R|qn+#&J%+1ds~Rr18Cr4X))D2GH>H1sYn|2|cwlglHWl_Q%x083 zFC~rxs=}0M13l*R*u@e~-X(2H$(1Rc@p(sL0sb;F ziP(vLwFSsfG)u)FCZV-6sD#~ezCu`q3uOLuPIiU-`TQ>gNG#~1q~k65*jg%E`HwhQ z%iBEU+MC?O0eLulJ<8;L>u?H*hdOLMR%_xsT#d*V19zcOZS4hC4{x+onj%MBL#nX^}p{jZULFl8^nf z?H(JIA3F3m`Ub^V#}y1ASykk#>)^OLb(ZO?1kcJ0#;pLmwl+O(?E;gVKcd`u{i&8= zVLDJ&$EG7?nUg_7^VE6NQVQGy7$9yv-0@NCH{$}YnTUAx9{)8QAn05UN2AKVZ@?pl;UDye&C5XR9;O#BWlzjKG!K4e$eMn73bpLy}Ra|+oHfa@HlTv6+3X4>R-$Blt%Jq++!a_mI7Jq zZ3CRaYc}j&@$9Co9jXZ{JZod_gwJzsaZ^WO7hvCjFuu#C=!j*SeO!$gn~xX$cA15n zSSxC$;EXicBXdOzR@z3DG%u(RdT^n^;4vZAmXA33%ZGX2Ts@MxA64~4CPP3;-4P>~ zJFko#S@I%su%X3Z6$apP&X|zdW*uE&-PBqVnnQLtmlxKfJ?xI(tzL&cU@&4U=y;Z& zo0ZuFv&*utin9x_DlKm43|@4ZiPn2gz!)2W`|C&Mth(LB4F?bgOj$gEuQh6xWtUHL zH>9d+faFX;!XOyC=aX^&) z`87{bjQg+*EAQ-f)>0V@EfO42^k7~Gp;4szBKj+H$@uS)I*N!u&>()^5Cp-Q6fq2DLv-zYTu$i5 zE3}EMhmL6*IT1xxQ{j1E7SEWB@H$y7XB%*CAu^YKWn#~Yzx_!U_bFw;<;RlOy`cRV z>x0w~yDPuU2A;b8FAcxf7-+wT^xwT!%QQ>$CBG_(tLgTP8}893TkY0R@^#chE=fkX zcW}d;R_8o{r2{;kt1S-i|0Vx7uI@~jmJLmPM7DFf>#kYv~3Xo|M_^fh;J>&hCP z@%W4{#(Birl@IdXt&^9_<3Nf>JT|bcEP<~0`Y>0aEKEj4B&XF*UeEJiP5$?Mjn@Kf zB`;f>?~0Zm-mosk8=uqNt4=zhRQpin&oaFps*b{N5ninlOsAjcqdrZqdXUXeF8nh8 z`l}`VYV1sG!k9ymv17iA3&PPdp8)k?UY7d4O2{Ro{^PU+No1 zLk87Nq7VFxp7s7p{k2~ud_lje!an=rU-I=kj|JFpr*3@vq2F23XJpO^y3SGMNG$ZS zDbwg?z4Iiq#d)BA++tXTUpc-y$?~9sDF5MK(*Mt|uCO|WyuQ)0NP+;lng1gZu6xt| z$!md@MIsr*$&4dItRlS6nZ?=W=Y~By8f72GZ0Mx7MDYpfXJZO*0SULq$KMAE3*YTbHS zcmG$D|98huvov2=XCC2qO8hGZ-qtZa?InQW1ymcDij+bv#)b`>X)$mF466RyTA}5N z9S1GR{-4+gfFmbR!LViHW+M$7@p$l8Q~upz0RPN)XI<%=d74Ba%=$`cr4TJc{ASI# z;r~0d0SykxMGNcdJPmOI@nFQXt(?EXkYJZ~N+FU+j%^{&1tWmrh#xvOKCA zIm%>PgpEEwF$V{&ZBkLcY;rQ**%NI}m z?`(~7EZ~R+26RW~ew$7hTPgCAd5^m>{}F*!q*8od{4bVG$JH5~H?09rG_MCb^!){C zc%zMUlfRWB*g#qH>^ANYexHxs6#E?ic|q>{B-4>KwN5V5!pOH$0t9F4}`;2PT&!P_?!R`A=-+5wI zUXNN`|8dsfu)|N+FOxPS|3* zx4{w_2Wcd`mtsC#Y(ibDp$utprl$3AA@iRLNnLa{hCyOf_xzj{MAODy;&m<4G>hK6 z@0#&*0ZN-%P=g^C4NCE5(NW`Lgm4uxp@aZ;3(?g!oJ}Rah^v>G1SpjZ8SvUX@gcX3$D7^Lbp(5U;(;uK((amqtjg|Gn_Y%kkv zUQSJeOs~uD3p_S>HO-B*b#*tSWy<2xCaajT1V3ExzLicoNVY048jxfgr?9e5kYP29 z`PH!Foc-e!ZptNBB#6Jub3r&4JZvu?EA2RBdt=S=_v9EtX>ad+Ra#o3pGH_$MC;#* zT*%52j%5kpDe@3pjJ~^&%);=LXj}n;!)er(u~R?kFsZW!u*zfs8(_qqX)Kkoxb+7O zb9gR=Ffg!mhI(G;{*8qswA{scF^C}rSB6^${53gF4r2!Wi*xSi zb6q|lyD$ft#R7>|9&@55FEi`KPNK=~KW6L4)!RAP9kcg}E7ZjbOhvuky4HG%D&B8)Y3;lW6tphZ=h^XA*O2EDgOJz zwTR?g;iTh67KNf|FRM%6p0Av*uuYbE4Dnd-=PR6mryxjvpC`Wpzm#37>n?3uxg=-i z$LFJa;)w|uuWcWH{UmYPid!nZKobMJE{X}xyVv939RJ6joPvAQN2yrFM$zFkhPoRc z&BeU^%_hpXwe;v^Ntzsf=uaz!Vq-PlAFNDWuZglZ@z>_nSQHyqT?fK<$6VoV66<1P z;?TWuPBo}*p}m}zB#%VfvdZmLeXrUBw=W;U1VwAet8d~|SDaN373r&CktPn`;)-{Q zl}DZ!3)9Zr`DbqdW_g9+I7bmy^!Oix>&9(-2HVBJiAj>}Zgn@KEur-*TF_X;ozGbZ zAfKUeqFKM8qZM9@+e1zZQFw7F>1G=CE@U>6JI1mlK!IT1E%;`bU(%>N2tinx56)t4udd?PrD z*A>t^Kp@h%;AR8X2{VO!*GkSW8M7ZeOPeUP`9z5ve`5CDp4r(?(W%U>f3S}%f&59w zkSRT{ydS>i!?nij1=WNakaZwxN}=DB!wKMFD5BTRO^(Zzt?GV>7M?JS&J$Luch`J7 zo#MCvI_7P@>*Divh6*`ji$&(Cn|G$gnw* z0SJgF9To^_(J-z?n(ET8lo0n_!&J0LG&jJ&@PZ!;epKx0uTe{j> zSlXMlr_QVLgZYf-|H>RjY(Dx)cOUo$GO^4-K+|AOUGs@r+~3R+;50h7 z@wx{xYXhPxwTbot%RGqTd9aGi)q9LlucRYSq~W}OMmc|}|5`2k916tGJ!V#pH%=s( zd2e(N6+LormlzR!r}7se-N0Xjbamb5{=Iz_y-v#Lx~blE=RwK(xj(s7H8-TlMw+2X zFr5wubb4}HwsCR33$J-6K@}spGeydHV9X`hgeWNQOq73nlbJ}?jZ@qM8!tu8vMY9H z4WP$`mnS*-i<|}tkiFxA1D)Lh{OWbtn5B%62-!zkj77^bLh!6R3*KTb(~Mn|9JvVeHb^nP8?cx z_5>Obiw6{sPYlM5O2#iY`EJ0xo#o?R4qj`HJ3zJK%S6F6AO-D6`xphP$eGSz$dE3c zKvZI!3otLcb0Xd@gd|Jn6ovc3Zo(WrrGT7}Th(FU^koN(6AfI zD7J9duE4^mj<4Ev$LWh%Cjwnfqm3h468uee?)BIoc6>VGfQ{lqHC2##%~sxt-09ca z-X|W|&lH^Kx@&BkU;-@%*r4Nh;d1jh^eTHHHMV}^wBF?ycW)h_mNUoT5IUkLyq-qP zr$|c|c*DF-$6y;=AbtzxTHoa4IHQV-C9NbufAWhKzWPb`a=$F$y=}z>a>q9^$#%Gr zWVBNu>{ms@iw+R-Ao+NrGURl8w|W>cpVK+inU2DcuyNLm9Pg^QBhf~x%2|Suxh=6+&qnFii$9B zuBx#zy*QbpMnI#LH+oA8jgg#)+_C;*?|=LjF?)6)SF zR&ykEnV-5tMU7UOHK{P1yLd=W>Wh0Q-@^3V!HfKtl;$ks9u7Nn>i&72ZL51wsaKRKQgzLM;lWrG6)UlcEihh6s71}}Aq z(Fl1dPEO~ejT#CN{Aq$DbD{Y(r30=>Y+qBcuz>i*>=N^@Ig2`qHaZ@G7@ud}=WcR& zPqIEX@VAp!sP!G^oEwIy?%uc}W#9VYdy(`ta%pFrz>c?!%vsfjhE(Ggsk{nP?44wP z45(;21XQEfJs7W7{K-jI{~q;sW>H59* z%|}C7KqWV7_U&Ij>9j#wS%Nc5MJG?mtC2++oSKAL;nbl#Tw8tk+7^G48$6{%Dx==R z$ZXoHm%GE(C2U-SIsGJ7f^cAPZB*gzvn@2h@5r~9NGtDj`m>(@|N469Yh+tg+&jfv z1?u+PyYMU~C3q8xai{sBK?!0svOkn*+Z-y+$b0Aj`atvZKjklp+etp1Okmi4xreMC z5`Ugw6rZkYL9vGaq&p;1!b8I$mKeRp@r;uus~;+&{}m$5kz_Wqwo5@`Q=@FPJr0jG zKFWMy`r_U^mv{@7!O@^Sw}41RFZryo$!QTe;rRl#y&wo=}H>C>ur#T&?Y+{AxGcsFr(Tdf2Fy)mgFmz@0TUsreDs zUfyyA(y&iUjc=HD35pTYKEFrWd3R%f*2zw5NM5JejD4ce%g7{>1Cd4eTzTqqiZHPu zs&I{ZNmDu|BXF9Py7i91MRwel5EVBjCSlO}Lv^qwJodPH=alQ2H@x4pWP7gG<%qhs=WVVe=ucK|?T+WB8Qg={u>5<(;Z%w!v(QU`o#O$K>_p+Hkk$ z>%!w(7mEtX(m-zt#?b_&Gru@(`#|K~W2vb-qu(1Nk}j}NuLF3kt&a$`LD$W>za$}4 z_ycu}Tl5?KdG?g`WnV~??Yw;Vl!X?4pu6BV3FbQ*2Ku!>)) zj*5qIIf{au_i>5rUAa~(K42mIyXJ@!Wv;*@xxLV`_9&l;!C{m|!JuV>eGn0>totSt zk3ft&you%cE?SYFZBMgq(7xE$xmV9uk7K@Es8O2azwJ1Ogd9#@HOBFPh3cZmiF?E zA(iE_?f4>>NPEhqNQ$@w#q(H)#FFeR25oc`rC{hhP(o5}~8R-J1eT{8r_T}E|=1bKz#P4W$vYIr;hpmi?!zx}Xs zYL%GaK#xZZ^lRuaU(xtv?`V{@+jovCH#}cZ-rxLD|L}7gpQ$ahC_D}7ua|Ig#@|&j zok*Jc796W&r-^@22D$QWb!S^r-D0tW@C+wjYo*QsO$SU9WaTP)(C;^FTT=p_2$$yHgrS4d);NRiFg+`#Z1 zkKKr}{TKx{{)*o3L;e9LXo5dH>fI_Q?4a%&^OSD z+p%wcGpVZfIBGMDFzbOTE|=3Fx#Eg4&kxDFMUGBZRi1`fXL^jGu61cH*&uv|&E=EQ zxm(_E8dizLwvQ-WFq9RRnR1m}doY;(Hh>Qd%#YGQ5TelQ_nj$ld@hlp+@c|#iavX0Tm_AlX|_(>}e%p+$B>9 zeSAhnz@Ftvi*Q`7jl*ZL3SG`A9PW<#dIhr@5Lr{Uh6xEjN{NJsHr}$;((#{8zV)K{ zKsucR?-~B-aR0?0Miu}uc4A^H6^ucjC`#2ZG%iJ)T$}|p7j8bUEl*3>lx@RuEdT`V zl-Hqz&tl%)9QuPz7i}y1jWs;xreQU*MU=of1zl|GW}?l|ILsdgc%QKCo6uv!S=L`9 z`GK(tPI13vbG393a3IBcPCh6N!zRvJeB}P*dJ~JZL3wglpuY|x368##tjg$K@_nm; zznm}bo2#c@a(>Mid7MaCNVICiZbvFABHQ2uyTC1pXQtQ;PWJE`&w0H^FY8;LB}uk^ z_eZ4oVFQIAoj&#e@kjO4hvq^KAL}fHvZCdXq9U3Rxw`pjR|FtY6TgSe?BSi|dbslh zCA}qBEIxHCC{r*>679Hq7}3kG+8&xRUzdkSf*IJSfvbhR-8ysCIJn#=l_g>&&k`gup3F)My5 zDyrtdsLm+E?Gwb!N~%=FB@-J)c1a_O$7Ko9n)mkSrM~d4oVEQxC?4N{TqBZ8gl$y7 ztEMB&2v$x9>pf?AeVn3x@+C01d_{yAvfaZpF7qufV7xtp!>>ayjP3o#y)@p@v)>D} zRY9ey1+Zh}IsYSfWS;}R5-gU5iL&)+4VAEIFA+wipTv`%Ng`!h$JEJb5RmtS4xD43wvy#Aft>S@L`-?R#m^jQs< zD;{ANhn90zjDoC?p)f|~n$3$7rZ+(;Vl}pi?+;l`}im0Dvb9%_5uP z1vD4J;3MK|Jcj^Oh!Ax4!23=EJ`KpPO&-AK8Uyed0w{kk2#*Ojvd^=|oS#rRffeu5 z9<@ahuQ6 z#kT}tXLIAVAuwloPhpWlfQ#wml)|LTVB$jif?>zm4rWcVa(@vf|5h%=HNmM=!z~X* zyhZ~J;n}7guRyPCEqPq>=S(n=V+lx2=R~m>X%$Jp#IJ}B>vBJwOY$ET`igOH;sc5) z`wI*g<$OI%dyexq`_Rfts&2xVvfSC6{6;_pRD8UZ4e<=@;N^+3;*BlT#o>ZA+qN7f z$LH&93kj(2xDp1fKuXIt&k9b^-BEsv)92KBbghOo#G<6Yq8-a?K9GdiIu+tegF9yr z=&o(X9xn<;vJz!VOeV$VRNaXNY}kWxWJ9c4q}f&gaR^Jx5a+nbK!F5nfdR+boa#vS zw>}eM&ESHXK<%9|?KMk$jl>tU<@idjYv#>s zn26o!-Y4U;RMkGNx}|6uPeqvBkef6?IX z7JL{W!3pl}5L|<6aCZp=f(}lw;O>&aog~-tvkg5wNH!0l~k8N_|H4NruXam#Sx_< z&*z9_nzCo>%O?u>fQ83*o9>>om52OP2JeP;oUX2`8)Az5jUKg$Y63gyAD`8fom7Us za5eCU=(W;^$j@f(#Toa~(tmcv-fJXo-L0CO7;JN>T~%z^j0EV2B+?{ z9&dMJ44L`OoazsA-ViF=Py0LKLdEe8Bok*JNu7#o{Pp+bd1#v_OQxzh-sm!~GuL|j z&{Z?g(Ob_n-rTvK7GytW6AGjAoJ!JEe$0xj_J)<@2$|;(93B)leS;O)*)Ntfq5QMk zB+NC$Ae5ip{$OXbN{%Znt-EVaSw&Xcrx5z)ZEY)0O-ZaeMchnV&Bqq7WKz?pdQ$9_Jy^$< z{JFEy#W{55c-^E0?K6$&d<)+Onn;~{S5RJN{3CX-qI(QY z0rvfP%4f5Fs0N>w=9HXjI^f8N<|m^u{YY#0JNrIc!#(@!D8eSv0U_zN~Vs zA}_9*h@5=VroyqHpk%Uw(4+!~v8K_2Q{SfQ;xIZpqbdwG^CGQTrBh`agX^Z!^{41> zyQ;jB5_6_$Edc>zq&jq``nZ{$m;Qxyy2{QLDU|vH%{|KA8tUn%;9qeD40iZAifxdY z#<9uXjcsGgd2e`UO)kdWjSQK|=UIcL%w({#y61Z)5@3;rGauE7OO5*e>)=2b8wnFe zXI#aS@$7-_YPiJX=|0OufZ6A@q1jNuD=ZTqyISY5>{w{vOu^UCt@r0m~QowQMr z72E8Vk|a`n4!lj98{AiI)>BNo)iTE2V^#dIX@+M%%qQh2S!Ft#NzKz#j0E0{7Vm`? zimXm&L`3Y(qX(fEWRqR~SdQOmH)?Mo&Yswpv2X5b>&j1yykF?!8fcJvHCC6nP}jTcr!G(HGKQ&hbm$OKjA&3sX&; z`kwjr`4YsI?u@^Dcdx|WuzdK(NExth*Cz#8OwO|H4fyQMZpCc8-oq#t!H>2hq~;{c z)GVhcyISqUiJne(v5`_C!co^BCEAeh?jaR#uPlhX!bBXLn{)|mbarYz>vB?tR(a;_ zI!SCQvXhtB8tbA_2OWOyYbm{+Qbm`QJJR2DPZTWhfL|D_C#Pa1rw(Y5QW!n<{n?tO z`wF_rA#y2rU6Gh`m*`J~Okx)=yk_;d>MN(LEJ164sAANt;%uB9Ulo#(i@Y{mq2zCb z#o9MwzizkNwOP4!fK-}dJ-f`xB;(imRhjeBV%n=ziR?S804u)soIP#D2fr=rqLOPK z)6yTzHJ?~7c`7`Tx}Gn*Y_oF{QRj}HghCZJ(5L|NF&__}>jl7PW|yFGFd1FF5Zzu7Vdcs%piukFk}X@e7Mfzz)J z{HfD+PNTC6!!HBA0rMWljY-g?VT#|i;T09_(yEKb>Bj0rJ;=?*H4_7h9|wM1_?;Da z!t1}^n0FHrncEw3?9V9PT>|?FsNGhpEPi`aY%#8%aXb~L+`DL-7Rd_L?c96cgcb7Yf=`D2@u5u!1RN&qACxIi1@_f;Hx zM$SxDW+$SC>UR0pl?#0h{k=umV!3r@3wEFt%ibms-CQemH~khM_urNC7HU8Ax(XBo`Jbgb^o! z`zF?dPU&IuZ(f}F5;fFydez$T79;SpvwbJUT3Z@v z8~rddqdz`UVF4VGd9`Yq1+GVYz3cq;J8ovw z(>{}njR=n~^C8x+FeqOLA^6{!R{xz^?|Qc4kJbO@nfgL^!mpP9N*=%Gr|NO-p7J{0 z;5+(HN@xWe<@2KR1!emU)?LKEQ*qb-H{^L4fL$P%-+3r2AZVKP>rr?rEeH{xmJ2_x zPkYv}Wz7iCUN6-|O0>{eBu;4yi1bx7HhULK(zf?MEiIQb@9okD4r_`ntu_~>^Al6d z+zVf#SpPYn}650;Vlv;{|0zAT!PHyWzl~p9kO>3^x+<5Z5zDG)f>6bl9Z^?RV z&6DuVEU9`Fda_zNCXp~P0lPzYXqzTDKfpSWS$P{*D?AW>hkN_=S*#o<8C2K?RMTh$ zuxf4V+VXWTdvor%3Wv{`Lsq@4fQ>mUoO0kHSmuVbL$n=zD&Cx>cTuFL}p>UUHbIxm@mh zcPA4P8iG2b}{BBXTaMFpatX5@=HqN z_=}A?tCeZr=et$@s>386PcSGK`!S|K=>C=5uOIRN97HEA6d=ClnXwyAPoLva)}nRR z2#ST(sI(d7JE*T;w(&E8N(;ggrr5b-WY699ja00Nkz&SmZnSwa;slx-++;Jw83i+i zubE%9cYGExElN_b(LcQ zjZ>$iDjO61=krVy{;~vd^Gr!4zif-bJ&X$H$iM}`8XGQb1c?^S8IhDLf;s568`3+y z_JS30%x^seLgo(Ar=s3)r?3Q6;{T?7vpwfy#Ok4|uU*+-6zcgO2n5UIKy_flHO;kqN z@6R)486#RF#L=B4o=(3EPwlET9OCN9`T6<<$2eUVGwxh^`Y=ehz}k^i4(UMXUix_TtMa?wIK9Ys@( zslb&5iLgE`cm3?k&S$G;!d&eeK4B?onSHNynq&v+P;-D+!8U1QcRI34nYtTq;W0kE zuiv{7z`>#WN!`!moa9o-!7BB;oKx&REn}f9ZB~Xht?wm*QmT@w)_kiC8{tkl(Q8&2 zfA60qUn*OS!VMYR(vKSzlx&;#T_^h=wLg|_^Qe*=vr<|02gEVmeiBZ~LCx9us5l3# zf>2bhT4ZZ&TE|kpV{Kic4SV;4pfRg^9PI^bXrI{+r>m9`gm`_}23CXHDM1~;SjTa5^ac*Y5uvy}#cRVFM+6~8GB zt8;uz@Egf8ocXdoEVig7YHJN6xEs_veq>uh=gIh4sE^<^9lhqujs>nL%#TwvdEviu zN?L3VX3zW~_n>m!_bI*JnMw+M(d-RTz1v;UL~T|RM~p@$WiRdL3Hj}Qm=Q^2S!88{ zare~J>IlUTIqHk{evdm{rdC*v=dW~}Vb|%=Y)cZYkmY8Kc){M@3iE6A-NDGRqN7u* zT#}V{YYQ&G%@2K+Ni44uDH01)hzPE{=&w}pr=(mmYKgxyA4G8;o@zilDz|^%ThE_F z#rGc(@-%0(%@4w+<112PwtLG~MiWB|6t+$$vSsY~_o`B>t!C+^bjJe-$!f$lYWa7K>cQ;NO zqhEZq&*S#ti`94Zc~{dE68F4<2_z>f+pM^`7izwfgHyppCQsyR1u3`s=~$#XGsHBm zOw1fPIVry_D)^j;uexI2eh&R-gqn_(p=8<6+puwiHTQ{+*+dl8?nFRyEq(dtObKjb zIc?yejv<~UvIa-3t*ygf`BUHy<$>bCSGVojLaml5KeS8|y%009e+1yQv7b+uRp0Q8 zo%0-?RP!KlV;>VLn?G(y^Zo}h{8x%1#;b1b4UuLw>XgL`clu4$QI&1O@RP~sna>N` z`qVDmrgarGennBV!(0BRltnmZ{4s35>r?DFjL}h ztkh0EKRe|9{MOUaN#wj<_0Y&*nO)_0VoR$1VcgJpG`D)Xs7i^KSi#<3LtNn1_o})x zw(q*3>yEuQ&m0YLvb9i0?0Jn!m%l*yq-EmdBlt<3{63_>gk9@|K5(sIc~6teGn33A z<`|O_SQjG{uY5zSlLdA0AzCTZt;hMh!AVr9g)q2NIEvSpk~w)06*0oyq(2<^-x_7iBsTw9U_Fr1rvhH^nGe zsoNi67M=_U3-yZ;6PH5+aTL5bl4OQw2c{|Z z3caXzvx1a@WsJ{kwc7k`PXy=hyhD?geiM#vl8g&wnrxGeSgI@j!OwVoW#UH5OD{^x z>mFAQZHoP{Yj3_`LOVGKdo=(kGJXzz^w$vpo>`BtwnbTyUG)s>P`NWbU zJgLP`N2ebMS)UqvWD_ca;uEa+nSYB&kV(vLGYMSk@xowy?*l&h zp5Ze!Psopl6UmjtpAkr2DJvfY{mkr6W3cZYdVy!B)_UJ zxV)W=JF`y6$kCGTO3Uyc+RTEd@5mQN6AG@AO>y)68o|ediCOEd6nb1^+Be{<%mav*=tW=(aHMqxw=nm4KB0qY_ulj<*@tA%NU^BGoon1T%Pgi_1$dA9;UHL(wP4wG1RcARX$0|%DAWcq)+C5DbK(du;a zC&N@@9eVV02U+I6fe|A-I2}HfNijSxVVgfw5ha%>O-5*XIyhB14{jbgWlu;VjW{d* z9~OlBzbuH9vy+FFlZQKq5BO07#I9^*X=^6w>;p0YzP|$Ta&Ut91UNxPAa*q~Hy|U3 zoB!{O8dmPko^BRa?w}__B;(^Dr|AJ0p})S#ae=u0rY9;27$<VmS48=EfY=or z&8)2?|M`^q=TqTHT0K>BJ1dL7`RZ{9fc|C$PEY&S;qiBc+(DcGHD4FNaH;{s zo-F1+hW4q*zYMLUt%tjsl^d`D+{M}HsTue|>{8AS&Tg76W)?tEX)7;V3o8vdK$ic} zCd*sdTHAPlIQal(n#yMG_8{KBYpLmJ?(rA%zpC;tN&Y20yRNMzV9NP`Hu;xX=j0IJ zLrsB2DEI_XtHOcX2xDY{QNoQBFGMy{EPg3>7wF(!>Q zqk4_iZpsF?7Xy+;igg;&y2B?l_?Fv%l1OVxI+movTnmxCEdAoncQRL5+1j(dMX~m6 zr^%?FPsSebSE2fId&y{88ykgYBNx+6CZe?fC9E9-F;RGwNSc{B$-2wR7%J`4aV1fA zvG{2VK>EGYse#ZQyKQF+GBUCzC*}vSKes`}uX<=LK)Qj{oGU^1LF@)b-vOy25798< z-DLZ}_LP*UexBdH!sm5Zq@Wbi^BDRf!NIWc>+#YA3@)gyu>%C^?Z<>fVq-ZU;WM%5 zTQpW4i%iYTuo-qpOJg*k2iC}|T3W_TH!7pjgw7Jdh}2p7_LZtGe|N?byi_f=dX2k; z4@}5z#B5~}KUI!P4ccKt%>L#@0g@X%3)pQ1w7p;3?c~`=06gXW04YY9ebaQ;?H{=a zO<<;}eOTq|zKNaYQGKG0#*~_~dcMo`=V~;_HgFPuH`s8OoND#@*Pn&G!Iwa_91w_< z;$7GKhsPP$8~9i`5||%^p(>@5d$&(d58#NDRNW45FVrpi3mT+oMTXt4!rTJ|p2RXz zgPyeE_Y{26Uq6#(RS!x#7Y$c?RJ1tP#4( z;lF690kk$}PZbFR^+HkHtP(yJz5(*%x&!i94>s%$yAOuR3G`T0^P&KB=*08Qp;?6k zaeVP_UHkZH7qgmk0f9KeAoRG`lLpRwLA#5>&Ot@f#1a4zM6ec1u=w)g&3wlL$10wo zZRAtlU?A^o{nf%&)EltKNF;+7CLTapwoFfOFk8s`XsP{Yr|oj5x9XjFnJ|ewvmMZnm&d&N{$s=NcjK(c4aO8KFhrb1Av)nRwtBl%-Ikta%DwL!W@e55zGxjTg z_@wT8qN<~l3e`(xd1D@Pv*i7Y{>2ML#O8&EXDbqJ5ZZ@NIk_J7tke;Uc) zv3Jqx4t&r;-3Yd9`J1Vc(UK=tSz<#2_necB%}Erp^4eJPj}Is~c6N4d?(V0@o&Hxx z%bn4&mxjqf8k4^!k$=gC3DC0CBR@5=+70!e-+Y0DLj%-7!v2>I4p}1pGrJyC0&cyP zozFfAxWC{X92^v;Qq2{8r=g)yW7Hm=u(q~_gh6b|u5q-`eC$m&8~AwNsyf*2eTM;A+Y^Lf>d3pJ#PoLCSW}^{QbA%aB2jZ!Z-C8MFAy%v3!Xn4I zgZ_N~_>zx@$G4G>$<@_W-sNBxdU0`aD{Shlpn!&mNr%g}Jy&ndkC^s>3!Lb_J))2y z6@e{dR~NgVGKG7%^7R9%K+vNwJ_|d08frY1h@=KCQ)IXcUEtlN8f=Xb4wY+`y4dX$Lm=o5;*~4Xv)cOl zkVy$)P>Cpx|K-8g<@@`9gRpQ3kaV=vzQwo8hr6qHgi00`v<37pUsChCJR3wAx3jl* zMaA#s-9S;P4zCNiJ8YIm_kI7GlPYY#Z#gCg6A}ClRzE{HRG^w_S6}DzX#MM#o+av2 zw-x{0{iyo~cr;z9i?_NpoQaE*pP#Qrg3E*&Ow|HtOB|&T?)K&3!ra`P8l@uC2dh)^ z!81$P*R4N>lr2q3<+KM!4D>C9SvOA)B(~v*Nr<7+Z2yz3cc43gLJcD+x#QW*+rtOV*?Do-odBnGmTboq1 zL4YFnQeFEFz_0xBK3-mGcK*fQeUG0=L-d)I9lW6&BDEiITB{e}7bY7^R(xw>DSrsH z7|7EDYC{`bphqR_^WJEBdU}k!{_Sv*M912#D{;hS5+dxGkuL%QsIsU~ZWAsouiKac z3_%(k(1KPOcM8(cNr$aZq_UelUY}a@R-J>K8Q!_gNKM(?VtvB1JZ^Kfy0kL631 zZ2s_O0a`th`aa$QPP0@c?92PKh@QjAQkCygaI5kfS5(RM`0$LtjV& zpgPSZdU4cZ*b#v%U*H@7`#`AyM_vL!O6tk)>uyUl%X&lxp(ppi}1qg1{xX)T;}s`O4bz7rV6#l+5a z2L%}!QSz$fiaPowpQEpuL>6adQP@x+W~9~!FcT6JlWuMN9DZtGcE^!{iLtTwHeq35 z#*VnCO~-)MI|K~pki6Ej>0lQaW*O3_cq`zoV>>cb!7!=&;BVmOR>lNvR(dxHisHo{ z`RVCpz<_nvDRC^Xt(9xAohm&>4xw6mJ84iz0X_8%Gg^RfOz%_HgxU z|MxTX#DoOU&uGh9!&We)Z+nRHp;;0M2`NeBzE4}p)YTQr#1~lxR9UfQ;n2&b_DzhO z1Qf3*VWVX+Q)h(x$r-g8|QkDD{Ai}E^ED%{@_#Y+6y6cbc=?iH1l?~n)AOb`_#lBq6s z6Gj1b9FvFj00PA1`t0LuXEpz&j{Hi0Fk}doGRIqLZ~-U0M$ME;GWdzhI-o0`m)`j( zB(*WKu(}EL$w}bmo1-B;$w}X52dJDq6)DzC8>w=l{X~RvY?{^RJRAX`|$3 z0CP_vjhi_tHXFnava5yHf`kQmOM!s_Y37bGsv{*#?V>9^%iee@OKP=XEoDEVhQ^2e;(14cl zlI)w{$`Wwr-2^~j&a#ew`&mD^vt)yw(alo}Ip&$~!g(D&o^^Kvc-~()ClbccWdyL4 z6htH%ik96nvd!_M7oH*7je^(_v+C#yJ&D6rre{p?QdPUu`fZe9R7^~iOP@GEarKEJ zE1JDJOC@AR?{L*0r?J_YUB9ED_$7=05AP6i{*-IsDVLFJc)KARABDYLKVs!W0DHdW z`qs|uSFkR!Y@a~($jJ#qBynYB##8k;R#(|tv|MY8O%6WKFD#^^;7~c--D7;B#P0qa z$$(3>OGWtAwJ9`V`NtjEn>TL=UcB}>?!4mU;yNW+K%I(^M@-1caq>)!VzmuKSb@07 z%0%U|>NVa1>&q&$yNCJTZ5^!94=r+_Rgl{xlxTwuhYR}l$X^xky!X;O<{KP~+ zA9ZPILOoJKLqij3zTzqg)RoZybRHf8kZtzr(dUz$SJug!exR|S)tcGsuR*U>fPUS! z2MjYRVGMI(e{M!`(d^zs60);9?WQXLR3hczac3i56qTGPI#MvgkSz^EJEKMLu4bbG z`dz4A3|ShuK<0Zmj@bQE{k65VM{`8>z4FZwt-rRq$Y$wc`6W6rZeWqaN~dLHp3DKp z^~whvX&e}esAA>D_BOR>K#O%Kq})TmV`t1d_g%>utw1uL7?8bB_Q~2%N%|QtJKwx| zWBZ}npt&ouCU+z_3nb>Z1g=wbbMnN)2gKOuD+n(y`^qFBAYf%>b#?r!S-&V5$Mpbu?qtJ<71h&OreB?CE9TW~ZUBi;56sP8nVKs7My6NY$b0CXo}MjiJm0&2e6aTX z`ZT5}>V$1v?tRFstK+Qf12$RwtT9q-`0W2QGJ;~*&AdBZ2Rd+rzGjDuORQqf_w5B8 zUHB)~_xZW5n*hYCs;OzxS3buDz(^8L)b+)ao4{Y$Ni!N59)18&L)8qP2zm{P?(pBE zK@hE!>gM5~9Q8XwerCiAx|TgsuD2)3PtF2AeWx39aCjI?r4_s~f8nhE4>3?tX=!C~ zi0J)EkLf-pX9#`*2kZUDCqO_nTu$JRi;G5I8j3ioX7DFV&{Of<8O!9(8zXfqT3VKk zf~abiWPhrNt7^EPa<|~Nf`hDoq>tatVD7O2ffdWI(l@iD^lv?c)2Gi@d%FNly{K4=w<0=mm1VaI9#JR zPNUR0PR)*Xg4xo-yzu$In4`$F8F8GBcYZVXpR6q;uKXiv1L&+y<+IFSe2iI|77mUe zH{Uha$~N;g{G?x(y?q8wS@*j^Wq4BygDdmE& z%`*bGBPSXBmE8CJXtmNGRi4DyS?hwLN)ttoRu_I zs`%|jF-#X5bAG|g_=d_aMfv#>2TYuTRIz^+dSrcsU4a0J-EZjoApHYVeTS7b3uw=> zZ!=Hy@W_Y`#0nVr=~yqO<}F%sN5>%Ton}rTx(4`Uq&J(wIgfrp@#OX&0Z*mLD_0Je zYL8v}CmV_686gR}Al9D|uk8}WiW$D2-m-ve(DR3j&gKgWrj8EN3APFD8UFqqz=q9& z-A#y_;S-vgS6BB&y9hZ!Pxa#44Uf&NW+R_~Q!^=RY+j}$O|LyQ3B1w1m%&!D(QPu9 z`u?KjeRD)?WV~wKe57A;n#oP|2V#4vUh{~~-!-aA*H2D^RED1>NH?MUPCW_mL_s%0 zCM<0dnd-FQlhvT{7l1unP4PXFY_Mx^WrKY}}Rz}~b{8+Z* z-4QE0@ zbco$WW}tNq*}zJ&M7p;(EKF{**3ioHG(n{f+-;h`H9_7{do4<)sGtDd6Lri1=`S>5G?)AKPNYqk@l=Y)A#*_Y`>cH%};@;PN?e99#l=tQ+-5+LME zHuLiG8vRw&p?atSu$@n-&NrvIlw{!*5uq$VV)NXaD%aUA`x#OvgEsU&HK$v_H+Mc8 zsAvPTfmEO5L6`>1S;!Oc`8!ieK8uLhbtc<%oSxg=%nN0GgP-`n*JrHTf{}%#K~SiZBKb3YC5Afm z93FnM{`|8MkFAGu#uM0DMqC(OYWD_`3u9xZ+Q?;k@K}25t?t{vF4Eik8~4iJ_>pDK zVJor#m?`@ehVQ-?85^6;X*Jkr|8qXq$JbXtv5SDim3BRbKRZ{-X|d%L2yL3J!xCu~ z6ci3eiP#XM#C%S~!&biGo%NBJG;oELs^$Xm#=Fz?fkUx!!9@C8j2m4R6l{tyIUruh zj+oLFa6dUa+YTw}*L3X%EXb{Z-S$W}37g>;Sz}{Bsn_Q}F@N?t)PL~w z70v;INDY5n#DJ*iN^zR;8P|@I5#yKg(Oj`#7ax&JtWSSEMudizr2|SQhqbmW!GE+U z`tCNJ%I)TCb7DdxSDc9YY6KX2&9hWqE?9bc@|=aoIxaNjt6a1L(Mn$5GfM}Mp%Kzi z9MCXcOPOB?rKF^^>j++|NPqugV0H%tM;)xeC05fkWqS9MwI_A_Nc@zi2*gn^GY z);q7?WqbzgH)hCuE}wIbB0=dR5dZ4bzgwIiHiy`~T9C$U@9HuRV=?Y{P7{28Bq{bG zEB-Xvd=7|I8O184v333)5jwFtR1Nr%&H-i;BqLap@aO~Lw)#RF$3cID&OTrfUIQV+ z`CMj&H$j3~ol=m^$A`PfAi&36=4y$f6=tc=!%Sfkf8A3c`GQh?PuKdW1F!S5Tj6c&R_OQl_Dp4b93(1}($i-j zdkoe$zJJuHFnM>OZSOjjvi72)tjs2U-{B+B-yRo&lEpC5ypY&2<>JG2xnJ6CJjJ;p z0&X_bY%1TuYu|e@rCu%+X|NbHJ6#m&H95?|OMP2{+D$X{0;*z~$#@6HhGSd6D6RxA zhBOcwWIdcg4=H|-b<)?N41x5CF)RborOF@~Xx?VTiieFV_%j5Ej{*9${)=sS>y^VI z=)}G(Ig`)%t+jOzlglxvcYyPukn9I9USC0cj0K*Cn%dav;uYboD~l+TI$?BRc&g|A z3>GP;bkn$D&$txV7j~Hc{MngX!^<%QpQLXDe(LIY*mC!@a&b=~=kI0eg|acvp#e8% z+hjKFE(0_tA5}%i2M{d=;+DGJ`DC5c*ikqOKK&juWp=@iX!9k@Ryp&T~(( z;NJt#m`-%$thxJPm^&9{W@on-L&`HUh!6-ca?`T0btF(_OE2S7sm>M~3z7$0shz`46lfTCT3AAxIdwC^-D` z=&ouQYeJ?1u$KW~^R;p94stsRCK)3_DhngFGzWIW7Tk6hA{Pa7qR-q41Whk-C`*1G zO^2(}I=|w**A9L~RTK^b246sT(b4rgoaZ4d=@47&^ygE7XnrA5L?x5DE>BJ-q)5-o zIxs;%DFIFefbJM{10gr!2ytJasvg%TtH#QZJY$y|2GR!PStROU{?DYHn`PH6u1}#j zunfl_R%8kCThQ7QM5LlWPkpN&U&cNg#SMz{Vn{V=_fi09NCg;1>i#wnopI_A>g7)~ zlm1|v-zttuRg5QL%!g(}YWh(TD&B)hDX8W3^_wg4A1f=rRKPTRX&alddi%CXM?#C` z5r`sBVYOQfVBce0fVCNkg75U(=mMBju!E&S-+>>1S&G7CvwnFv?UE6s2ZY}}zY{fB zT2f54>bwEJ^qIRhe|3T{y}v%M`or)#3k;2F&4n;SdQmOu#k@g;*H4&;J-74Z+Zp?r ztQ^sIf;2R6!LwoKRx`;XpUTSn23OE?t!33&?yip0O0`;?H%t`K_z1n;0fP@10vn17 zit9!nlH^#@{Y7n1XTS}oN60~&%PM?;AQkVpxhcZDEChQE4sYeX)@!=J-3fZ%_7Ft{ z0JHtbGsY$5V_&4WTyv5!0x>QwRp_FIMTt9nu>y?%Ve*)pb2|u<>T6UIg&0S9>@0ag zj2gT!%n$bm^`i-(K@?RpdFmX(VZt~WBMQ-g=Mn2nIl{j1qe#^u=jubK(wR{q-;gIb zx?W17DXG&+t}K=o=$UHyoVUBKBa>O*w>vKI8Va37c~MOZzPt5krFT7iB`{mjaMI%9 zDpdwbMvftFLixk#xu=!Rtnt2FsDTQOuuHT=*S|5$=9LlRAY`?SpAH+LU)=^eIWYnH z`E`n%oZPkhOE>L8i^wL2^fJcRax;Vwl&dgZO%{19J|vG9aWaVM#kvGC zsB(v2K+xcOkZZ(cRdA1M51T>LkMsG?`Ah?fkhf2Byj~3*YAM_2_xJaJNnTSlQ{Pi` zCFC9BmnT3Wje!qk1b^fY+d$VvS&*umJ#nurK+YHTpLLiol7b*zL`%6~R2t_LE2XV% zeXB-ig&4K$yIz@*=qq(@?)Zn{qQ=Kd%A!7Y2L0)h(v+BGF2=ej7^I#tuae_d#;&G) zJ6WPE-#cEQ_Zg}e(eTAs8ER=|^)1$`uiJL>md7i&9Z<`PmxuO*40+7P7CzR&2w zuCy7#Yx!1Ev0qe7i9QQ19`{2&oaO~m>PN%XCkM*fO1*B6PcaW}ll{mLd^T4O{acSC zgK2mi*89IAW4ksqSj>i771$>E&)6)N3|Bz~F6@r&Ojl>a!EQD$mTU=l8ufOGZnrX> z&)Wm~p{QpL{8od6^p__asK+sKI0rK0tLl^sHUu4QT~v{{HZpXm5vU~HfcIBtQKa4V z56iQ)Ld1Z;ySkt@xhC&m)xNOF574Sj&|o3wa|)H~=?{9`4gw>Va&1jkG7>Y8YQ<*Z zF1x=*x_0Y*&$(hZXaVwzRAQy*zY^wl3X85B~B+Ma~dByba_Ca|zSB@EFnr68kcq z=g;g2bRcc%0IUI~(LR%)lQNo;dw1?O`ZvN6afTpy9oZy`!%7$5? zpC+eT?8dGp$JQ;vmPTAx5)cW#0WH$>!Bb=;sG%b^BUB&{^PrP@MXqUxQ2GsxGZSma z0COQ_7@B_nQbh#XOnCl_^{kNg=F?vFKleg(|GI?<;`b8Sh2c#AC&yXt0wm>^OU@%T3p!} z-6sPe<`P)nioYvM%gRuRP-3^oLIw4W7T@_%!oLLLRG)R}6Gr>Z)x@LK z(u~u+SX&8<(Cc&D{3t@*7s7wrMYU+sLUK+kpx%l-jB<01Ud{{GALX%8B;GFrt4Dr6 zEn%8&+^)M{J7rCNn4;@%DZ5`ZTCWZT#kGim>`+Dsb&GNuPZ3{IQdi@p=M+mpWT?>8 z$tt>BMhlQB^%DmAC0sh-Vg9>A|f21{7Tg8>8o z(k9yl#f-!-Q-ekJH?QN{*x_9O$mEaFWpM7QWR~Gjk`07lBsyhGO=&rZ&AgWsM_W-; z-vZ_kWrSk3t!%ZEv2V^NRb3-HqTj2iL1c{kb}ZIhLJLU+Dma230|;MyOWbO5T%dCU zMkkp4WqKX+7#~N){!lPR&U0@~E)?hm-tNAX2GD1A$0%MOIAc50xhq)hYfm-5m>Ww`_Ce|XD<@i!)9EXx}0N($@>i0^|7N46fr0HNK z*%S%#bxv2!FMjGNgF=Q+>p?Pk9VOLaCt6|MO5&fVczs7TQnec>^-5)D9D8UP5%h%i{*7@zt!rbS>MZoaCK=aD#6XMwxE>;im@BM4h! zsk{WInmC6fFID{~b~US`CiEFvZy}gTX&DNuOdlLn9H=FVwiPMp*M1izdevkAeGklS zt_IIf&BuD9=u7UtM}{$J5oZu419unWOySl5!5cuS3=tG%(R-i#NAUxM{@LMP-zk_d zTGyUSeWm4!a=r&OhGK>d(x>NY69f_~>Uvrna(&YDifdN}zxRu&IzWNS#Wc%>zCV1Z z(*9Mf^(pB*D?~=HXe{#k7^6B%$(G~%53)?nn*)^$8y{5xi&EvODf5Q_<|17dSX5E` zHX(QS$*~P-iAwhE@t?&^SnlX>SYlaY_`7C)RZQMGR%fff;uK!nLs`@hETD2X)Bg`E%|NX-uU@xZnE zjp!)fIKF$++n^VXi<<%tk!Hz(>QgSP=?iRGTucCQ%@Zh+|597C_;o2@NSnlnWmJ=ex;Eb>Q0>vO<>+NpAcwN2Z;~i-k@}vKO-bCs9E2VViADDdb zP0`WOcqqgL6p(9FiZBXE&uErYO&tKkcblV$xBIzz6q-wDB2>PohsEU0wmZymPLZHf zID5J3DCLmc{sJ3XPNyhj_sUQ?qBnYnV2!{(vL9Kei0_tx28Tco>EU-w6px zf-%U>(pBL_?NbK#hLZf=efT=OHYQtIrl6&k!}$*VU@(6P)8!|w|3hei(L;(W1X*5P#new}*=m{+=JAsJC;#00QL zl3VdvS%|^&uqa}wy4ZS{wJXqR7#%f%cu_-;C~?CVU~Ng7OR@Myb%s_1ArhHFIT8R- ziK5An!>tv39ix%eBsUHlI(k2z4KzWi)J+)d&?J}4x5w%~7i8Jqsp*a8(roRb%CIfC!hU54t8?UcI zNn$-pTb3C<2n$!64G1{XTvt|BUJ$H{d#;BQ-IS`~nLgvZqkzbC)t-JGr4WMO(-KaE zXYC$+T`Eu7x_6F<+JY!t5vd^VrMUJAe-~BfdF>L;E1cYY9;!$K@q6``XB2>Q-{*>4 z>;H-n7uYYE9RH^stt9MDJxK1?&y%QfK!x#{rKuvGyUGTC zFEgU;5X@M@zHe-pWEgKD{wIXBr?)pyWZVxbUq>8Qh+zz%;?%!E=)bU4bPDG8_VrDs zJZ~|-2xG0ywL>Wd0P2nSAJA1m(-B?Aq33{$DMt-j z?rHCrun#VQ{{eN@WS!geI%s6Df53NnE=AdHXGS8a#Zvg+(B6!R{l4Db*uf8DLL*sE z#PWJeX35j`)PMa~xSI(AxO-n1B-Qt*enEe{$v2gvVkR2l2fK8r$mVZAKPx*M6#QHA z*H>3B3zlFriT~ZICro^se$B_~-G4e%w8+%lytjZAU`^^9`5FPo1cf6fCnx#IaTj^N z{&}Ju`iz~+Lm$gRm}%ojx`btbGXZ2D9AT|lg)$B%cc|84wj zuS@+w#Ho{7sShWsx*BHKqwttSq0`t)`F*bRe=K`YzGkh^)cK~tO3B8iKyTjjSMdcQ zF3X!PsH}3E{;Y$+b9wm*tUUNA>+{!r*!lVL-Eor(D3Qk)zK8y9W*Yyh!Mrcz`A#lg zJ|(GL;1kf9N%xvi!1v}7(5EcS&D(nP8$HLj=cdWI9Z5kg!g}0i`<0BmjBkC}cF_2m zm;mZF7WX5|uuihiPcI9re*oxx&E4#c>WFeRL(^XGKH8Z=slbI^pZZj!Aq42V9}J9(QdhBP8;gXiIqqmy}$d*SvMn(aaP3`16_zi;7WhkFP0q@!W$ zNK&fYq#N>eLit^8Ysuy#^ulhLM6R|imzO{3kGSi}-db9QM0}#)8zL0wHmhVh!@-TQ z12d6lU#jVo+Eo0lprCJCXy#E|n)euMX6KJ&f|lu8p(9ZKO{z8w82>IV(6`GUZj6tw zSL&E64K9xI_t#$wjKb!LT{=8QCHn! z6jo(T(}|<)9gUq;!(QzbJop5gVbc}!eCS7)2V=xmwo~;qj``ZQlJpHXz4k}%Kt(Rz zX2#f6B?ha+Xk{H#4k;kiN8)1va3bqRfX*R!ad+71~@!Cd8^^} zHDjrDw?WGCCRH)#C;p@x;)Q@^>3L>G#zSFfbEmnvd3H{Y+`!0Fo8LS>TKqSw=P}_HrxvpCNCFtzZ-T zM<%xR441yTLd*x%aulnqvh9wtwe;4aWjy=^avr#U7nXG>iGEM_w&iYKPIYt`wJ)5R zlsZ(BkO2BHh^Yz(>iac80z*IsN~zP|-kIq9s}3&|!6k$?cmT?f&pr_NJv^3({Hm`k zm`gK8uno2P{Fa!b`FM5`IY=ONA4N$hP5-S%zw&2 zSNV!rd4~P?{;)eSH}P`wvO}bSzk7Q(+Q7&F1bxrZce@|)vhh%E<#x6$0b|vh3~6W( zDtbOft}M&Q;!#f>Ta0W4<2<4|ExMVF_$q5_TUC;lJY=NfMn;z%+(v^5Py!L%^|=vX zg-a_boT0DYWBxkU42Cd*N+2fR0xCXz2u&KN36e#tEoK}>uXJ{VL@45nzYgGFWKF_z zu&&sP!77y_l%iriGPoH9a7Mj+FvbVUupe(n-tWlnpqqdjpFf-yj27ISoE#_~{~{oh zt6K*>fcs9>85Z2+#gNYu4Ey`N zq^sZJ0gN(}0%RqKlSRD&_8Q;^qaV=9Nvyy9BJAT?;)=cGxweNUD;n0ejRw2@5dK~* zXd+~hBqW?AzLZ6_N=uIre2sDxyp4mJrj#}w+;{Wr56cJlx;WVLcGbx$iO-ir5?pr* z>sAo?OIPzn;fo*^)z6^69HfBV9Sj7JiMVT1fYbHX9Ko*@If}e#ul*VHOrSq#}nD7=vmA z>L2c>B=OQGWM=`CTzq^Q$BnZvqa{|=-mt7qNhX=pSl0V}%U3$R1tdaNag5MtY)WqvB zR%PpuvY-f~sa?6Q zUG(NRt0jE2g`SMYQ*2Yyvvbl5b28Hl_mRCAS}Pk;gjs^{FuD*_^UmXAmg1&!b4E!y zclX!g$FpfQ?$BS){jMpjXqn#Tve08Wb*_a*D#%yNn4(Y-(L}x%&JM#v@BU^-mS! z$!f}=dM4nzDC)~ADHCl>-05_&ueW-BeYTUz=hou?@cZ}g`piV|t=XFYVhZKg>AQ5` zsjm`aI5{z*Q;Es)GDARJ*Ifuy)UG4Q1kVBd7a17|xC?R+)>d{#Od!yL5gO{VvJmhx z4Vrz2ut(}0x5t=vKM;e`#W7e1UNKEVM#({3Do9)5n3FOmqOV+6t;65XIw z7()_7X#yCSap)8RE}MXy-~a*oA&u9s7jbB7M0Y$7n~CG%<6qT&&&txRcf+L0*Cn{Ll_Yy`eV9?m)SU^m0LF?>J` z8lxy}V62OBsY}?Oynwb$uDfZB_qqG>a$yyXcnG&kg0WVUH@|Le!<@g2XfLn*d`GiG z8OqOQln(Zz-%j0NC4qV>$K#hsxuV}ic@mk4##vl;r#NkN>s)&B`E~hD_hzfxqi7uN z?`{EwM$qGU6*#R^kI@UX)?adlz@8>@n#ilEAwNE;0`jGxvB^m}1V$oD+F@Ee%;T4e zM}T32Al`hn5WX{0YJx0ox6qWDn#$%CqZH$92$U$@8-Q?vLy?J21)>csX2<{@(bB$W zEp9$ec;!q83f07rUKry11jE|)wp9>?kUP$-?03=3&6SoF5K%l1xXuC`FyWaa>rU!r zNhK8(72s1q%1YY`L@L)9L*pz&lAl|Eu*Drz(6jUM;3GXGWMpJQu8HB`RWXwgRHftJ zH!vJoc_LBw-p_Y+z4Xvn8LBIaGbR!7ZIIMyaOXVM($y_+3JiAzQ%uB61S49>JRJpN zvYXs;5gzkj^;ZT{g4!00`GkI7bKayzw+O^Hp>BIKm4Gb~pO9cujA0o-39>t4AW3?; z>JzA6DswReRtRkjp#02(-EzDJ1&^iS-9g%E15A69&N~`YovHcPq^hf^1VCeyvWDG_ zfB!ycP9SIJM@TX~UhOF>BnVG$1RBl`CS@#{@{$r|CZc#H71ofv|6#OJx+0|4;OEu{ zQqiX$T-1T0bMOSk>XCbFZ0yjC=+x=K!aZQ&S5{WyaDl@gKxgOwno)0HAWl>Huku?;U7SJn=zW8q-34mhY^|`E6i>qbJ$2_OfSPajgSTx<}Pu>`BDi*s0IzmbzJUl$wq|vPg zr!};50KTh#07e9fYDft0ncJ?WG*r6(*)qW_bx;tH{&-e8Hb2uwvXHC*I?Zh^9+NJN zU1m16GZj{vmk})=Ik{hSH4qXIbcY_k13Fcdqi?Tzq0rld&b8B>$pbn}lIwc&{>K0S zxC+AfoYGw_$Dow9>lv>CXl!DCmPF%8>0FafeO=u(;OX4WrT|hCZ1-Q=M*|A~CYSDc z)5pP5WwElO#a8$fncJQ5?8Nu%bsi_#Tdy*VZu;D;sB5Zz)|psXo&ZB2q4V*<#v2y41);?1TG#`R^Uvh zit_RtEmaZ15It9bzeEXg^4E^;-kB(Tzl!4nM-~_j{w*&Xf7j%(nR;s2@*or}SlQrg z&4_T>-v;bj1n8@g8awuZ`fW_KGr%$cUr;*CHagM-W)u|2S6%GQZVX9wUmmT%sLgFFK+`Q-nw6DhYGEO_3V(R#ukBc4oDF)7elQ&_@yfT22Jv+DRM16uzQWexQcfXdD_I&cetT zz4sw}%MA9aY}m!)7l=h4TRe zGY7}dkF=k-z+%4dB$n3zGCF49V80oJ!Bs8{DE)><2JvVhNj)P`@I)s(f6V94H0TmL zgwQZTmJ~$>(>}0PgvbYhfoz=dW-8NP0?VvXpwkKXLah&nU{5H@%ez>HeXo@IuA>9y z5efv0ATQvxnOd~GvfBi_O4SmJ!L-V+Spq1a7XanKCP7I)SR`$_^(;P*mVocOWpz0N z(N8EK*2F(=zTRdzDAqVUh2Q56u7dj=0FUgjNI4!LOp z-Zhv23WP+57tQx%@5J7%)jI#NkyBEoqVQj*n}L16sN@c8RHjjWyr(;J3D9R@^cTtO z%)i#1dK?~NVq#uL4-F}wMLPS_?A@~z-wjxC?eep}hLH@^sDPyVjaBPL`880dx6f!4 zXaf^v)$hB$2Aml%-JVhJ?qR!P8Oh|BmJ(sUZTvd^r8^$G-Xhj700gUAG8 zQ+GPKnpO4-SPwB&9!>&e-4C>n+%5O?paT{jq}$A@v@|pT#>035a#(8{8_9~*)m3<8 ztiBX}e7wx6Dxx1qUO*2V+E}CZ^-c5)z#DKiH8hZW9$p0`!Z@q~R*dM&lx-lL1rk?z^^I^UrA#A#Pkj7N{LTftH7t(H`1WdjT%5Y$;{a;29Ai;S1jzO;=rD#_-FKm%x$%a4M>Z!=nGU z_`#bzO^|Mh`!f<%gaJZvz2}*gb^B}27d3}=_hS|PgBtvwzFR!~WYW{$AHNk73A;HE z+-6S7!wUAIJd|R2ph?61?S<^WA{8+i8p|wTz|!r#ug~|;Q@>GoRTqwaeUb7PBlsoy?s(D=mX9n5_!K?*=jo4o$~}v* zh&c2kqCp!7to24gV9WE9mYVA=PfT!pS;n{?5r$(FoF)0F9~hK&^Zo`dsVkC3kADh8 z;q6qweRL^=MFL`(Fqmb)QiHSrGLa#?g(PKAX~b)wn{Ecxwe{ez4>)*~71^3Yv_Al& zcF{;7sp({6@Y{!iyu6q*mpZ;)g2<$vaV8R*l*}nDM9z!|SaMH0EK-}w3aJm5Ztrk!Vk3q8tP>TIsupXsX>$JAq8QwVa zgz&1x`LC`K?oVAnA)WPf#hK_5y`qs1-N^~~IMOwz+MzqSq~kq?<#6NH&QFgx;ik!- znW7x~M}zDz;xV_Zur#m`FI+aGNkhyjP?rM;NFddx?rBoiQKGxz*Ep_DHE|eao*M&X zz(l616JvD-!s8-OzrV;UDth^ldiFTrheomvQdG~imAJ~{ZZ~1CbW`&=>f61OH|ggh zxw@j1;C(cP*IJE!fSf4qO-?i>Em~99;d|3^_!gWv)+a#03=QWlJ5ZZf+b}tK!0~_EiD~xuBmQ; z8RFwu{VZRMtY?bBdFwqsb%z{}jw0GAK7$SOH1gJAh~!+gh)2z`r7ag!I0M9XxU;~9 z*8SC)!hv#GbYA8FDGM=eC*##nHz}XPgDCedYz7dZZw?`$Jx-<<2?J0W3!fGE|8QmM zqwdkR=LhZILTWw~k)|0|rcLO@Rr_2#K&4S%JQ;m)4ia9XW)1_L-Cy+@POd<(chG#_ zl$VE!T_|G&L<2wx-pL-qTYRqHnZZPqE0{~~p)Mti>wdDXaQkBvO@);@Q!!0wYiPcG z?_9b3oCkeL`4x8O)M+P%(()xTUtIw4qKZS9kNH70@aD)W{*1bfjdnGYVEa2)#}Zx~slPmF9A`C^KOd);DKo zn}aPa+O&j8VJg24L-gnhHGS>x-BAkCxwGjlke9DU;5aGfs6B(%b2(gW z1$zb9J>}gpHVJFfIJjq3E?cTux+*FeQBi6O4IU@JgVk7d<8=#}glrATn*64C&F=qC z;w4_%Vto%180k->0q!l$FzQMf@JT4|XJ)i5A9-jzaOJd~C;&#d*Ay4Hez5#-ZO4DS z!Ln6GTg4Oty)@X1i!@O1P_AIE#T1^Q6UnV-q#9se!0Lo(j4H-jh@@Neu7wyMRW(F{ z=TTlQRVrgYaJDl^6%2&0q)9eU->~xKHJnEGEy_xn7ig6u_0j@>e1P%9MASye^Au&f zyJovW-17s-wci*RL|Mzr%f~q|QIIlf%aj^-hGk{ZZsO+(ck^_Mo1hSel3j*EK5Qqedpm(`|bOvWSy)0r1VGRbROFzS^P9@^IDB`RqDrcMG_!9 zr-A5a0HwLWa}lg@s*`GxUn^RF>|w5;5V?8*`KUI-oI~T0p0&^ zPJ}F|I*o40D=6%FCJDOj$|x#A?8=BA{*yDkX8qL-y!P?Q34>yarmE@)TQ~w(L4?G_ zJH!N=`v2rSAXxzE?B5BHhgjGefD9*!+HR;J;`=@@j>DhC+vGKj{sOf1T(;~W_J=v& zwC~uz>@EO6ATUg=*W_KBivIW-@K!AW2BfF-GbH`^D5}8W-QA7HsI|O|3?n=HzHApQ zVmkJ>f8P-x&gwR}t0?F#zwaw>0)PO|6Nn-U1a#F=Rmhqk)K39i9^pHHcX&uiA-aTb zaf{m*=u{W6kbEZarcVb9lz%0NFFOM_PGQZWXxP{azka1;XDpX_}tJ2T(3b9e2Ksz5YJiI^&Akw1$Ty$A*fWb-Wyb|?@ zQyggj1mA&z4K8L+m=3R=5=pEfu+_kuDap%|f~+;lc(^#xrjESg$Wvi(Imf%9kadCe zIS-GgP}==<@%^*Lm)|u9(q6DYRRvQOzr-U_({gYq$-bHpJWMJiEd&I_+I!dZvpqlM z+6VvII$*BB2L4EJWc33c7pR)bysw;W;t~>|7)1bx@&?f}NK6;msl-jo)OSZ1sDBn| zm1mcf3}q->GoP%9Hv)ZxJFwj${#`#Ud@N8N@fp{wgOG7Y4cgNqb_3%1?(HD(0Kv^@ zO|aep0&Iq;X|XWwZCFp&IH5xE4DR+y?V+OVtH_>17XB824tvUz3j zqN~YCK^JDkZ8|P4wRDtEQUYaUj~|Db08xB~`t^H}&Lxc^UCyEdn;8>0dlq!mH|pvF z&o`{=(w>#Q02F?H)0&Te#t6u*fXwtFJpGtcuSv^HlmH4M-5b!chxHSLFr{(kv?1C^ zMmIGVFsPY3Yn&=3ziQqT{4PKd?*y194+Opx{STyYM=1B|RVV^I9UXw97(1P=Pv1&I zZ_b@U3K(*SDA1L{OhCrP4Fr3r9;cfz5<2|DHBIGtyd0qU*YtrIk*5l4*IC?BLi6P^ zIw}Bh^HfT6$Gznz-}C4fZ)x4cMQt7TGs6S&EQqFwzGQ?sf~2B`Jw2wbEB;#}Qs=V8 zFAJ#>CdglC@-~AJ{ds0nMk_wCeI*&LctLBkvC20KAA<1h#pp&5IL2jAqTQBeYOW8a z7r(RvEegvJOK>=PzW#QK+w|K-X<1q%vp5y{_~IF$DpWsM;JcnIg~wV z?MWoEW>N?y*jg$I-5-r#Y-&t0tN=>dMK^v(FK4FK& z2jbaE>Fful5j%o@w`Y@OEv2&CFjE-21ROMgH$u)+KdQ)iUvK4reqbPRlg5e(q3AT$ z#hy9!6#StnpJ1;}q8!%g9yw3c7?9c02_X8%UKhL5G>!aDdI{`$u^Kuut*}QcU%ENz z^YRh|uV$^*u=(u143bgu_HAVr5spE4#&V2~#J^JT2cZX$`xpC;&6q~Eb_F1;^gUb@ zyOe!}Fe0Uo&KMDfH~?m07zknDg-?OxRKKwP=h4ntYD`%{cY%^E=pToNISnjq*T*~v z2vk6qst{9D;a?jjq+ruz?}$|!lwLAXjIJdka#H{+_2Wu2`jajzEL9<=0?3;(D;}To zy1YFLi}aHXli}jutS=nPEGwy}2;>qO&K05<1b9NpB;5X~LM8lFQ2|`Se05F0Fyc(R z9=C*?KcGOf_~F-RsJlSo70I#%q-_@yg*y8EJ-r{9_=vWuEvU#x&%V~9$YxirB}ZPJ ze(tHOs{`I8{mV6&L1q(A#Ps-YJ_O$u!Q!Ku0&rNbwLMlCpx+q$7EYZCuRS5|pr=QQ zs>KucUaS-U)g7+cR6AN>@oNJ>p^Ix8zTwTNJ@yGUb8{xHVs!%_V-;S#k^ub#*d@cq zUxEq{5;J7~`)q>!{_{Pp3w{(rboV`Mcp-9ZNoWHi`s1k=1c#us)tB7-zzI?5AvrcO z0G=C^OA)wQ^E4?nD78y)xh|E4OZ{$a%#6&&Lq8(PCbYnbBLePAXvC)|1$ol*f%<3` zSbrpJs*Yy>fc>)gnzapD+;BaUoB^t(OG_$XcwWoP-x)8_qyJUqwEGHOJMJjs3c;cN zyTbXD1!R>t@07D<3Gpe&k{3gjA|k5h6;2pL#-!wRjpeHDqqLs6ZadJHW=CBNAz-%RS3wrb^D3K}8vP$`g`8c9wk03C<~}ES z{X&YaxRE-bU~oK8poiN?=|8LGLXP{E!f!Fpf6#X_(06hL&oAnce(>8{pn=Q6_vuSX z5AQR3D^`o2e7lK3!B+_5xj5kls0yxY@L+ebvM&K^Np*qKjq?7BNC*5JG^g z9iBy%e8W$B*zdHN=%1Q;h~^GyUwBs9j!*t@*r_HdAT{j}DG}2CcK0-hSG1D!+~_BR z(8Hp&5&}VyCB$g9d+gntAY#!f((T9ufgJgJLNVG0v&XGWALV`x<=lr+;`!V1lX5Aq z#)%-+^Yrb#cix?LZj}=;vnTz1$uYwc_!OaT?HvOh$MGJE+(H0TaNuCs{KWD_o%caY zyzAe8cX;j*Lov^K+PFD6!C1UuY9CPWE^x)NQ~VL~!g0~6)WCjFlaETVAAXSV`0;23 zI|E-!HR(D7TbaW${%@mz?K8C!w)_xQ2ELe<?JhXlAHV3^rS?!=PI{}Y5h|x z_*xUIo0An92r<#T!E`Y#PKob@2!pIi(0HxVpoUi`Q_f;F@x2)ixl}r6P+p@!#3k*! zhd5Z=JccYqWly}AQ4$>3nhay?ygn#?!P*U_rA6xUUi(B-y>`)|fVw2mrK!*P2#b4- z&rVMff`1)R>dlxb!2XjHLd@qBy%r>F!s^Gua2fTH?z6+wf+4SESxn^A#} zv{gb^9;}?OBk^?7Zc;iPbvubugt965`g&lP@@GUfK%Ug7Ga{9x3`y%lt!w{Wb3|MC z=@%AcEyYcm!ZlZKe%L8mY#TMz>IAUk)S>+QpC5f-WjKu)JAPn~1^Z4F8t&l?`xPhc zrgXtSr+#1aTN)GtWA3)B{bJD(0~><&-5W?KDeQN>A@45xS1G{|v&wJkYM->g{$8f# zJ7W6hde5NmzVyRRmNC2XW8+GF4}NX9+?y?MRe}B!<$Um;k3uWyZi06Q=&C)iBR91M z&Zl-RBhT{ID4}~))t4JO7i2=Z&X7Q=K z79Oc-ZT8bIcO-Iq`)as4g-yghYWe>6UE3J!pxFAe&j)xehxJNT>&6L*(FhQ(bIG3h zZ;)~>1xc*3xo7KdTpv%VyOO5_>*Wfgvc1DGt>re|8>wygAuZZ_=8P0e2Ah@-3`iJ) zMUEFi{+&69;0to-Y%KvrwhzDCv?C=n^pYT*d#R>NYadF&2JUS+X4ZJbF_LKdQ%&@m zBq{UN#7dM?HJ)Hgd-Phzqp(N{jp1(+Kg`gz6oq8p^nS7??f8TX4?f86Cy3J`U#g~! z^Mg+vGZYA~u!fqE1p9otov0x`ZKRV^!DJQ)Who&`dKt0sH&5KY8g)Av$MLkr!g~jj zF`#?}8zJ%aoml(?xQMFE_urhpnTQ1{7GO=#Tk1KUL z9IrVPRRQj)%NL#Qr-&U`rhV7R@`as|8%WRv(jUrf#g7F1^+HT~2rsI9y?t9=%Wjv_!z-aayZ6Obs7URsi!0R-sSv2_$WhEamv~pbsT?px~Go_qp zkcRI~pOJp_L$07BEiG;8kZd!*m zHSW9R=!UV%bYrh1KVQ-AS$!&Ltl8<^1<5bl8?Y*jX*9b}7eWrgpths>t?z zmmZhju36s^13o|AD6>ItK#U#XjF27G&kDjITlN7IqdUE!KpeMWN0jh+#CZNS)a2k7 zt(^8}$Gg1|czGN9_l``?V~!g#^6wj%7^ZGD%>7s15*`+rb32 zrGTv9o;s0w_{34|-$5^PNDZR}gYnf~)}K}Gwd->U+gDARW6Yob-Z#=91>F@tDO^2o zE9tuM#0ldH48P{U300eM)W7=nNMw*SgO8>r_Z}{WZ;cnF-R*H2U(j;IV&)yXkL3$uDsfI7swP%fJsl<8zc*qcgF|2r#gu5UkFTXgv^d zR=hU}wYc)4cx)3l#>7j7*)Y7=K^316QTkzi?yHY(Y%ph-OuIVq@t!h);$Nm z!}q8*j`brdhh>?s<0*c#x_dnS?Py)%j0{D4J-4mvOz&k8Si4?geHqs(H*3i_`lq%!A#sSUt zw^hs{&Zq^)?N(KUWz(JQa!aq)(VLb-;ufeeZwhx3b| z^|_VD6O%UvB?)$7&E_7$EIY=#CrM?S98-HCahe-9Bro!@^BPP$N`*X&NrH8RHOd=I zK&RUxQ+5)KvB!NwtRmmF(k2#!I&%s}e9fnhxu3QlaHOpV#qk|@zm#@MUW*qrgs`v{ zquiL@ob}3okBeu#%Ra}2mdBMj&Ssd79sgY1JNjNSgvzwnAk|4jQ{R7#xy3vC%g|%E zPv=3vRsmjMubnztzY27)RN|{{ZGlipLsDb*-9<2IOc{Z_X|>iJdKH|y;I;de&&YB5 zVcxwwo2-V&y6OD=@fb-TFU|P|J&~g0OWBli%}g4Gs^5h+!CGm2!3DRxDec2dg+8&$ zZ87+Rv&uD`0?w9abX^Yj(oWqoexv$=%yeCJ1T-`ZY1Lscw>x_UiCT@WywB^|i;I&! ze5v@%@%{ly@MFIqbpzuF^rtB_PwEZHRGnrynM&suHEI(O_AID34!7lG%9iyK+gq8A zs@BGvI=UNOWmInxN%^M`cMOGsKdbfDvr}LMCpqbsuu}|B?Syf6eh8CL?e&VF%jDnp zhQda3|9Z?#QPv|WVFTkn9Q+!_g&!^K4&GL_tB!c3uP=^^PTZZWJDB&HS<=++2@zSG zr}GALsg3Z5t~8ozq=^^ zxF@{X?RFLEW6_YUZkanPY0he%Q2|r1EFuemiq08-o8*pq&lch1NljLj&vJZLurv8% z?cw;|FBXn8<6VtLaA8*wh=OS?Mh6>@r=GL^>F>R*693Yw&nJdItZG%Wed;xSe_YOu z)4Z5SQUI51&<-xPPQ!R|a~F8yk~lb3x7f-t#&+GXLl|Mp-ZZ>=F{r&=5XX1%>SjKd zoN=7)=Qsr0VAA`W&oS3+eI~?$@i5H%Dj->jJ;=kgQAwfMp-YQtR=MlD*L<(ooH4jI z`!@0=H4RyXMKVwR?{wepbYJ{90iG)s{}%iK*;f}FyzlebC~AxNeeR~r)^?Ay6B*FG zvE;r4KP%culM2u)!D`?qO~zZP6<>JtZ^O16*DUd9!fP<3om~?dW*lD){(c?+GmBr; zM#__~miC(~l$7v8`(TFFHQ^|I#|2jgDLYI=+rq3srgm6Hh* zXWOpLrTu%`-Po0p{?ztbres_4GlZWFXmL3WG$9)f3nNuj_+l~1dGgTNZ-3l22qUY=-`Ytt#cA#Ziz<0Z&oMki>FE-Z_XX=! z<#|f~aV1?Peo;F$bZ`3M+WZ-9L#PyF)+w#_=%g1FyqS7!H*+o4pd#0)%~jLfW8P1P z^$OcOsu(A&{TXyxAe_y>`pr_%PRW3i`JURxSfN)TlrKw%oX(3r#5P;&O@qITIH4a@ zm|fawBN&ygK~ht@z38czYzrz=^hzX;(IFG^HCW+n)uz^pI!=Mu`E1pOwu;tHf%s51 zV&AL}pNeoAH0^5_SJqupQWm?Az)w2bpWllfxZ~YS8Sz8$UAHHsSlBb@bfefH;R3h` zij{QoMxWPgw6-;{^}dDEqc~5Q@tYv4)~|Z_7ah^|8KPH;tQeVI4SH)+p{oZ- zR}815Z0ab&BPIqBnrTL(Q!IUR%oV98`XLCdah;qTAK5P2=Qu^q1G(+ktk!kXv7evJ z)qSD-$Y}|DpZ4>+Ag7g|Iz{o$LT>{d*un+kxvp%)g7Gw4dl&92?r$}`yyQ%TO@iw@ zMPy8J&fO4_M>Ur;8Y^)IkN>ZoB5izT`dwYf&6+v# z#UEx;alCI=gn6!~GZ(w!TNAVxZ1REdz z7k}8+ct5@?xe=~IIf$)iMs_&-(!HO!MtcP1vl@$#z0T|fD#m@2?UTmAR}m%TH#T-F z0^RBw)XfEZrmULn5b>@}3v@_MKk8V?fK?Zq;Cicx%`4sig^PVdwz0hQ{gCzq(co3% z<#h6=wBP5$8&#M-Ugw)#T6y=bG|bESeqrO zFT0rBR?6wD1aMMaSxo78i#jru^5e>e$kHDiQ*zZ`4a$YmQX9*E3R+#3Nkn|B&r5*dUWi|D;B3w3Cj}wddcs5 zL@XHBwszuA-5~~6=7Y>XU$RmZ@1F2(>FDEL_FV47f01QkQMpmrqk&j3nXObw^yIEZ zwa%`{8BwVkx4qv*KoM#`4}@zD)1IzZaTRnvE$0p!nZyVWW}7Us+6%JR`mq<-Mp|t9f{^QAkD}`52i!X z(ZedqVHW0(X%)QK$M{%YicWR<*Ks2mOa&7}b{*{RWgb-3ks2z+WARf}R^iz8n*DW_ zEViqOVzN{1--k>5N*vABwJRB4d*JoMO%Q7+PfyZgDA?e4uoh=9+ zB;c1GR!8tfWf1jL9ytzbIzf7LNZ>CWtRRcQ9g>!TQylnTE=P^rhVLNo!^1u=rb4@h z*aT>B5$lUdLq5$jPeOD}{E65g{=YP^5EwX6jF7`565YfKAJi9;qA1Q&4u0BTj1g#M zLdD#za-!tBvfUMO%!scgG<+8i-`ZoQTk-${Rq>T2>|<%G&qc{ehqR3>HJY0mKhBdB z)(e~|g*8zdhq5^dLtfYsJx~$iDNRb9(#VW}2`f@Pa!stXWLq{pTBPfbNs~u>Oaipv3=J$v>5DT%hR5)Qy@e0KdP5mZo1~eX@ct^O z;r>z_zP_9^1p5Hy`q3YwjyiQRmv}(~2#?S|U0@h(ggqP6QxZ)3v%E`k!W>wqww?T6 zya_V-YqL<3qW{@agiD%#?H81vfraeIQ)*ItM1Sqqwo=1<=)?2ZxStUJY|(!%aejD@ zZ2I881H-Tvt^Era{vY2x_V@cBxj1?L3v8>W6E#APUCv;pVn()>UwV3$65gPe7! zsrm&n;}Fq{+k4S9l0i-F-%mc9Xr*DsF4|!2R?560xC+f;oDMy`_$)8t{rcC!-P!3v z&Qj>-?bF2zKcU+->|f2Jqw}+)2Wj`Ww6;&%i=lRxzo8fVHzLjV=ciZo`@X*otfYFz zWTJ~cx7B5QHyD<1^fZopw!z@1=;thKT5Q9uzDxAjBzzDM*bTmKW^JWLZYgcf`})L= z8iwt&ajuGAy!tNkYj&Yt1Z~UUZmjY4WNTIjbJ;1*!$BpQ>hxCaLki8xn}8Q;C5^;0c~2XaHy~M?YS{6-o|#98Ls%hI z2o*jPwhVn<*(0VYU0g*NImek%ichpKend;ro~<88(|w9@({y~r*f9d&q&ZY zlcdsKU)%fqSUYw;LSt-k{Lmge@kA^vW`L1TTu)SQpmIF$ya=9af;B0;idXhyiRti6 z$x~`FXDYj9(bq9bp@&8u11H3tDXSc4sZ|eh?6y;4gZRqxJ?R23jK8oXk-VhP?5{9b zd$itFK14KN%E3HM_T2x26DN^8xn{|lB_*_r$1pl@wW=ePpU34}s8+_inwf^gE#2pr zi-l$ErYOR#L-tgCKRvupKdCdmT~Lk|wj!d`Up#-n9aJB&o};NrGD4HV+{?O{%52)h zS{_7)bnNovO=ande#dCyW?K@)L%KOa+0~iqr!(D;2x+uLF@n%3Peq-?wcz=w126+# z73YScl%!Wd7b-};=aD`a>CQExL{NAtj+pyyH`19~sO_T+?WuGdg1at_l+}#qQ^rpS zc&wrjNTVvVwqFO!JFbTeSV?3-LPy!Dc`LHdZGyCY@q=hBx#*vFtOOyE@-Gi}A!8z0 zHS_2jCK7}p3>iMd&u>$NW+zI2yc2rGziS!pF`)9p!8DG!;AP&a87HP-r}0|5B|(_L z7kemFLNKU9$PjJxr9p(;p<9Uj_Lqjw?XTH~E9Qyb_kL1k<5mSdq*0}hcpXtv6CS)( z6JnM?_#jQQ5I5YW((P*##FU(=a`nJ2{H4m=2V^8pD#UDFIHp7-mJjty%xx;k>XoGp zolvPZ_vl1RWX>=uDs`VBGk69p&d#o|{>H9UZPA!9U# zPyThQfQ{nLn@tw2KZkDggl^1zItnYJ$C%f06NHcXg$yG;1WGfOzwdvnnoG;4x=S#f_QUFvO9B6Mwu6MYiHY<=c8L=1 zTrFo#eXvi>K#G+etsByV$)1w{Z*=*e^0SOFC5=uKTW##Th={K;XlKUDoK)Y%9UP}O zdmpT0=WyvVuq!Y~9FVSaT*ipF^2Dj03(P#~U_~f7HGiFvt)!OfvT2j>B`#m_Q1lvO zY*yti4!^fkD{JQbnI+rl7}5GJQhdn#qW#OZMlUf-0o5I(yJik8nv zSjy2ME#HajQnOK8WzQBknaQ`tdx~jiRn9Y6ieh6)9Yv2ZJooqJ@{h!!#foIrt=)Z2 z`OAw~ihuUt#D5~T9HK+=Pzi-gZ1LS9&YyLJ?aje^!Rk{s-MxF8)ziUeaRJ z60H>uCfmhL{1*9^uC;`woykZhBbTR3%Tpmg`FVEP_qRoy7qVclC=ECj#Y>#yV1AOi58!t8%87ghc(TA3^KTnAUMAb z7%#yg=H_EdJJ03Zgnv7WZX0>drObPHg0XKImVDC^ET(xet`nfMq6l6m+F(M_OA?an z!#TT4_S=}H;>FY7B|iEjh6*C=Hg_k}!^T!WPJh-v9ewu>DeW2Z-MnLtY2!!s>15~H zrq2d8-?bZg+>o9gFMP&ZxL~Si7Zx1kxD99giU*(7uR(q_l1pVbt{7JIE9ZxL9wOJl zll$(rNt@+kNk;Q@reIOWM_*BVDWeHdA+~BoA|mtU?L29*t`p~R2_RK@;;N|{ZYz6` zLN)EGfpI%eB(yR4OYEhK%fj|NKh2y_aqz=y2em^;xU?9y)WBE>xv9NsdeGN6(#)^H znJc?2JUG>a$f+D}=DdV-t#p!b(3kvzttQT$zQ(u~2o)^bAbuRO-V1E(MBh@xeKKF& zw(y3A@j)bM?>VA*H6yn~9*&2!O9~v1VezZrmCK=g?SSB0C2gl-kvxsw#8hi6Y4}%k z4>xUCue8F|)(6&FR{WnF%x9#OOJqv-ePo8%u`!uXx=o&8C1ggDR-Ymll+2VpkClt` zv>jl4)MfFQkG?n{&TAhlAONX|;U;ZGKAJDAsfGAm%4%Q?fea}D+S{dHg`Oo?b&jnm z#7WN}S9fp4_IY7JC8hNij@=edKQdV1%xI5>?cG%=ocPSQZYo%W?Wk~JcSRA5V_b9!_}_^>0*32}wmj8YGnNMnXhNx+J6pBn0U;=nx4(zDNjCN(s`T zbR(&RARVHlG`!Ekz0W!O{N6jp9pk?Dj>{i=?5})TYt6Z4Jo9-z;rXFj^`{mY8%-kj z#X?hApL0njW-m=#9%`!H%TCm7=DqBmd1E~~&m~{1tg^}a&W(}iO6@0a$zBY6tZq?g z{rR(kl45CVy1*gqcB(HkX#_1(*G)U&Unr%^Vi8}m#gdAW{10ewKJ-0#JWtsh5{r+u zx+PtHo*}(!6|hB7fYR^>_v7z}WgP)9U|eHO=?8!SjDvO(TE)cjx>cvYO@x=kC9( zrondm)N}j)me;hf2+zO0riFP01^)r{`7wc~rRDyLYhO|8`qOi__&mQP4Kc|U((??s zExMI8QoOLvRbpNxzu7977S*yV7xIDp_3oL+giX@xq2KNmCfbp_<{7DoXjupoY`NUr zs;+h6;>Bi`M`5lD-}n|h>IVtk-SWb`uG=41yZK6m&yD^ZE!*)wIXS6axpSZ`2>#Ol z`iZ&hzvJ#d0zmk#uCArb&9TX29oJ4G_&sKA_A4(y_4leqeyQ;UWzCXh9R~QpHM1Kj z(1!4$!Q^Cr#Q$W+Uw1-B4t_o^(4MT*YTvB9yRlk6dHmD8UUTB!=`Rq%cb=@_8qil& z^L5^72~orK)twS0eXEvt$>J21lsrvY;Gbf+$c2;5$15J>U49l464LeqKC($knYdmi zf!`)kqFZnJZV&0g|L5I#j(j1LTo~O96ea)Tqa{$+^Df6Czm0(3=A@-vclAE~a{_eh zyPyQ0=b(W!!^Lz8zhd%mUMTNQJo05Z__9N56!pJ9MmCVHAdgRprTX}A<3+vJ#0TW# zn}!CPXF!r4vR8sn7P$B*tV-7v9boop3vnTh@WMy6BndyrSf0NE_tA(B91yndL8(uG zZMB@-GLIAGa-`5VwTVy2$1wPK6|`Ag@xaxOn60J3CI>i~`3|PYBQ}!@Uv@#7lb2`J zlUY?|@YlD7OB+Jn!43ff9Z1To930IJ<&%CZ{0pG^S-7bWT=q989`hhgneec%jaqI4 z3h~PVws9-Vy(wZNt{vc-vAy_NmU(fsYy~uZc}hS)A}41NNLzK6lxu)qh*eIHv?5o4 z!|4@pV}VFQ4ph_uV4GgaG<7)VXAOcUz_#0CH>sog1(u`(E%z4Ee7PcYuzJB=i-t2ZFHv1?#U|ppEXRR|@OGjJVsNRnnz3y!sshp1GyVD*-0}v;Su96aN z#5uUBsR@wOU#Np=LAEa}5Lh7qC2nqM0l_?kePXUtV3?GW5~<4*3@*10E)7l~q@g(u zB9odjGBQ8_1fOGIzVlcCnFC1C&_Q29h#855*ROs95e3sEKhU=@s(M%~frA1pcnik* z`Z@r5!PmJ|EXvvqTqgJc`CkjX_BwnI*X`w@wp4{8y%b=vPI2f_vlH`)cy45NZN4l2 z_G#RLeC3);qXhtjn-jqo)-(v%0?%;3zLV$Ltg>>@dg2~{xs4{bVKOi@y~)Z-Pgv^* zU58T2#j*0jPrpwGhbr0vctX!|zede~DOs6v1&fx(?S;Y4K~>tNJ8jaWrwXU|-* z;GUIex>{ogq7%S2AQ1+d(m^{Z{8!9ti&cJ_17t96tE$38UjYS#XIeZYd^dAsr{t^} z-_E|>2Sn?$X#hDpx?uDG?D_8JkZLf``E(|One54n;X;$>X8MDq^2(HDG6hb&1n?NH%X)4Wlgp>{nZW6B$wz3RB?m-0B3JgAx-M6T}2Dd^}hWe!|cK z_B`gBO0Vr@z&jIDmNnS8i)7Ihf+ERxLdo4UAjv(i2Cv#E93T`0H-v7nw#pC-7gr!I z!2>h(3K8eLd9R5aRya=9A;Pfn@$@ek_@`GrY(Vw_ELvKMs1yb}0y2escAfNrKtw#x z7U%-+cK>mK!PFs0+OJ(j%Z#}BP)@a_VXpH9JX!cx#fzq9aXMmIKYtsM2*=#znFszq z`EbE+IioV4_ovHAu-kc->94@U0rK81$S|3TKc@+r<4&V$eJ|2u=uLo9%^n1Dby`YI zLUkYkjd)0d$h|%~3bw#aXoN>D=+!)U0K?Y#B!)?dy?I_(j_oEJk4BU~nnp3|%1B3m zc3&aoH-FI zA6Qw<8wG>F@Le4e;kjexk%mg}WMo(8Jny@;NE}4`9+(HALnF7N6C)LJF;&Zd$xOxh zIQToW?*PSl!^F7p7A%4Es(2{pH{>4)8=Vb_)kzzH4ugY(qu>C4D=i$SIcqT0lnAUx z0Wx7k@@BRe(ctia4Mo9=-08A#47q(2V@`;Z!IZM9Dvg-iTftON#%>R=5Q#fWy4V@e zMoCL+0_NNqzn2EAL1W<+`T_v!d4y^%#oB)@xe*o8;hGH%c>C^M^KzSOI#9yv!Px?9 zEpN=doz3;|lD<2vX(4aPbE(m^hboXI+bRvS2fj8_57PW@t(ecq>->&HgFrlvRp|2> z8O9dID-1u{U4*4}r_hsUIB9N$ss@rS5$E>K3s*xBjT7Z39~17FhMhz=e2%Xet0)#(e6{&tNkr%SU9pK%cm4GCdw+S{N~ zoo#{Ts-0epsS!ZzNBM18&VH3V3im>MMv0~Bfvu0IkYE;x{WN%v2}_Mludwq~^f#au z#ZJcJxQ=ON3;d1&(usAGh`9HAUZW~2s&F5m zGV&Q8br#`6WCwGB$u0g6hcf(ICOLX``Ztf6^Z)+%04P2ARi5yuA%o^q{5?&fr2pPk zPxo0EhFbsHXR&|-uKgd|@(U9`eb=$m>4}D(O+3qz1Q!ZvgKah7SKWcCYyRo4&Y@d3 z9L)>Wt54)1kH-9|fkIC_NfrKg_DLWgRKpB1t0a&|P!0~{Q#*#OKgU5CnNfUZQ}`j^ z{A%E;bb6r+|GR)B*#>A0d1I#m%pe3Xz?@QXML+#MHM;cx3KT|CPxex^%V}UuR3}Ol z_bgfdLFNNo$uY1@A5ysq&vAhYrkGZ1`P~xVgWr=U$2)nKBjI^i+Yo^r*C;%+=H}+c zp!1r40x#($a4a$|x*NEB-P+j1|44vqgo%M+&Y;Y82vqh72tWb}W_u$`#qX;@JXkZ9 zXv(2W7ZVcN-tF%0!qX@ND;sO6W^X<@?@mxih>SWYe~gKi+Ks4Q4i(=6n;7|ACZE*ZygdcvNsc-2S7YpTB?q4si$2tkqKZ{pUf%0`z~Rzs{;&iA%UCh777l zN?KYM7!1KHZ}S&AFF{8TNd{`vXcc)f3oGb^;^Jb#Ru^7AUS0^N(nZzxG3J!P=V!qu z<50X28Kf36K(9>b6nOpmHF(bIYiMMM-q=5HDc?SaFE;$NH0NqDOz3(#Aa4Tc6gv0XwQJhgEoT}G?iv-z=Usz%`0Y}+(4>|T zMn)QaJt)a@FlJJl8WcM18I&%fubV zvkP`@dTCp~2ah{w@mz47xUuWjZ2UOw1ya&j6v_uSu%VBA*N29N^ja5Qd8^*BgW>e% zbN`#W^GqApy)(Q)Ht`edm**|fw1pD1#;)6>#Oi9!iph`Qb{u=!TVNgyktu7tU*28c zIYre~_wUfLKC^=MxK^qREYHh8HAh!Z@5?98Ui_kA82#luE+oV;QsS8^AG>a4#=LJ= zNCvTV$KnWUK|S|&lz2HjXWiOMK}tu(Dv9Guy6RSa8N5)Ymnh`cPv2BjRDgr+8F>M_>nUkgT0#VH z=uzSufNsg`qkrM>NZY@%WKWGx)fEP1Iy;?>Cbx?QE11G=<8jpxk&?cDxYQqTUs&hq zzBEUA!EPE8%+X5?$dk2iK&0^)V+ewwGY;?t0~w=rHrRaA*{6_;Jb&7bGYy^E(a{m$ zC^r1q;Cc4P%_dP|^?SUmNS~JEefjcbad8m{xs;`=dc6>`s+s2qovu|J-F>Hb$n*OB z!)Eb=xkL`nP~@;7NNVD^do1T>YrVMgB<(}isO)AQ{%_GMGBqQZI(SG|EHVDnj$3mt zgxMJ6p9+p2u;h__@p+3O5aczm%pHO(Kr1vnJUlfAxpV0~>YKa~AFhYzaNNebTd2v2 z;hjQkz>Yi|VTG=GPhl-Cb5vMZM_b#y;_g<({(%8??~g<6j0_CMetVW%xU;kCGp)bI zD>p$7EbkicY#LlI$G;S96ph1dbtUlYN*CRr7hF*oxng;RE)V+T&Q)HT*y5y@Sli6O zLXR!$Ip&&nVn96R{pap8?*cwNa}4>|AAZ>%%p1JJ<p@vc+AflHy?uHi(&GQ+y48p%g*{uusYtK|7H^$9NgN}1UiG7uG;Fa z`$X?CR?0L|ep%`2^{V9zP)-!Q!D_AgQ@iW-@2du}H{|8r7W+&gcFF6sBnCwodEX43 ziw;4rOd0Jbsswta6DbU^?JcsR{eFvXzBew!y8z29aY<6F_r*>^XOF z=xND?!R+oPY>~O0OIo@lp6O+A0!1U3n8{^cM%NR??Zf)IQ1G56;ha?Z8?;pIChLwM zg|{YMCm;FOvpZYQrj*(5*pc;ZtKH zFrnX7sU?8GPnf}tiL2vSwHijZW}hw-n4KGnS2WJ~NjKlGH(CQk_kl!U48kF@RDsl! z$_MMgQ+-Kr^4FbFPBO5iEM|gequZ470_?UxMK4>SysHlrR7dUYQt_1DhHPe z;UF$AvT7>UBoOGjSzF zMT==*yqo*LkGy;f95{HaAY_3yG+HF?Lgs;7Q(c|sZbJYpEpW!A5`Cyp{}$wxAvOkQ zM_@L>X=4Bm(7raU&pJ>lKUW9Qwnm)f<>m7@qb$oDCyl=gF2*k}(vl5VP za2)$^eu;;Ri;%Vz)SyCMlJfyaxhBuQEt-7=yfi^8zWL0&+MPQq=9)pO^Y?Y4Ssve- zieq5}?sh&rQMyB!mmvMnkSy}rgVc<2k#Ufb$hDKHbWTpPX$`VEJU)lJ4{%VARe%%IwdBr~O~w@X9x^(h90^VhdEw^zId=w&YddPiz|%U~)ZDyn6NYpA*SE#ww5f+&P@ZT|B`wCpU{FFR7gFoG~F>5Tpb zMjm}HFHwzq`uY^|Af8McxCxuIbLZZl(BtL2yFNB`IBjdSLvULB&)Fu znlU+jup1#b`Bv?I+w}o^KBU3&tw}YEB>pkcYVAUONbi%s=FU%^C9&~Hf z)&?ER<_6FmBf5A|01Q4Uo?)9fj`ZW6>9uCG&%5N}xz?7~{ z$zvuv)el5#9O_>VjV28g=A}W{$iLOE#Ffh#-kcsZOZ2F0^;Q!W1DC7*G4yS00*~f8V3h&4 z+@oJaLxY2KFQ@q~HG=p}JPRWn<3Mjf<24&vhCw6wK7U)7J`h`+a;zpt9|>~m>D z5`;We>|`%qy^8X37|M(9@BS#kGo>2IwYX@N@2?9_g8Bn`_at5{=j{)PkNhdDd@jjLQkJ8__Ucf zdwH4ZHj@(^K4^uVuDj~969`0jSxsd!t4&Dbo8F6*czEuII7mwoeWBP#Qb>@yt@b@| zQ{vQe#kRqre z()v##)&5f)WF0<|6blnNf_UPcdOry)I6;wJzP#d{Tg32zrT#2EY!N&yJJ)bQ7!q{A z!6;euFafDa||e#*o5*L9}(W%AqDxZcxHkN`8c!L0<~##&u9Q zc~Q+FKKM$f(Q9-IQ6aFl^PWGLH`LHzuta1dibWPpmItz<4bi!(FMTdHzj+~TkCq>X z^Xhozd%-c#`yY%DS3QKYv1Ct^y^-h8BVXyrmVT>Ft-@SAW*0BLRtO{^k!5!RfM# z!OT(*F9}f1_i5aDMQf;JH$)H+?U+3CnE}>YVNfLkpJDr`R^9_i?AJ<*3H{$pFC5`A zSbz;mTK0EM8jj_Og0W|1KZgEja{KiyGDI78Ay8YrU-_lpG-BXeBj2Qyd~&M2K>$V%O#jiP z++WXy5WQr3{HsKhUoAagqtm;@Z=$1ChQU0&8`a|H(0L)*(K|n;3a%!Y1+JwAopURhz2amjTaT zKY4G}E#qn`Fvqc&v#5ZMh2O9R($~VEwaMI4pV&BF>B1AJsjaK4E47+tCAQU8W{#nY z{S57OAUCaYP*&Q9&soKBU}FjVBO3JIkXgbDL-Mi5;YavE>+b~F*l-+ggSHz8G)}}A zL!@U>xvTs+?^7R1RaTKB`OW%Ai)9i%w{rIr&7x<~;sx&(ryqCeXGQq(xhRybT*8kC1Zo#o~hx=EY(KRWURn zkx-(rBj*Ew2jw*XQUWiw56m3I^p9+BjuJZ*U+8t6b7T8LLTm=o#e!x{=Q?*pzsCti zQ;GD$;g5f^ArO5%{jT9Q!&=SB@qq_Uci7pj0*_!Md6O_*nt%WPrSDbmKQ%3KcrbdI zySe=a3kRl@zMb2w)mM14R}^i;v3uryScQG*%yb`%q)n7r(${53Lz&<$rs|KMMc+H~ zciS4iHT+yPAGBfHkNym@L;K10ed?cuX3eRqg;%|6H3>__B>5qZjf(NW*}TcTV<(i6 zEkV}4G#EV_iI^Rl6n`dSrN`w4ZP=PpB<-#RX?MkE1M(XO=R$_bA7AY1>=b)2jgfc5 z_|u07jr8wGtaer__kPcYcL@XrgKtWuL}o&-ATn~^XV~yXahO|}ky%m`W>YkscyU=x z>@jnhsk?X+{(#TUgnPjpJP`5hf}P_;m8ClXa+ay^p38p}%_vw)5&#E?E~5DN{wIE2 zwD$C=yg3$otW5@x3rwplrZ+!^<|S~3g_Vf(;8X=+4>~D1xcfa=C6^vKs{=}#Hhocg zxQb6zv|q(Wn7wWfBBXV2IZR0SaR)S#id{jT5<5KM-D{A!-26E9dH>!3nD9h^s-iIz zJJe);G^}%$df>(Nt-6hF7-e8C>h`fYWzgJ@3O2GUTNot!vT7&rc5-nmdyt2O_TxR` zh>!lo=;acg^5us*c%QNtx+}pRXI?n`XuXxT)1_uLsv$Jckov1g=TFSBb5&%+nAP_p zUP#V#Mq#FIC$56XNT0-7dO(WEw~*QaSdNSi9w&sT-54FY{!mJ!XFk1Pobv{A;~kK1 zG2Jt^cDa?jct3~AM=gVq-q=iuzrc(5B6oy;u~3l+&0Z8iz>Q9nk$b#@JF$bi&N-32c^AD>v(BdP04+-A!Jgg%VPCiVZ+7D#4mYpR&QiUQOSJ6E+ ziqP+1bmVEpPG!P%!COghL6GMfB9d4W7Uu^=H~h{luF>?nKD*Q23|1kvLi_bP>LDwQ+7ymEfx8MnGVsdW}Ah81II7Z1-3D+wYS-uzwm1 zrmdVw#3ELG^Rezia^{cxtj*Ud_E8)+esi2dhj=le(S*DJbQj-GhrgRxzi#y-gz1(U zOMvRrTI^AGH~~68Z(bz9((Sb&IKmahRMofqFyqjI$BzHB}1wU zjy}3DY^dovym)jQH*R>_S@TNmr+Wp~{d2DkKR7%jwBVb`yQHppkPsGAGVrX<9klaD zjIzdzo{Xhj6cuL(>!P4I6Zjq{RgEEsFFSZ!=CY~OYx@r*sih5HX9+P!2%DgNWcU$WjS?n$fT^ zW1>f$Jfmxw#hk)v?sUZe0=+GVie1TXxzk=GOI?lc+{u^nFI1Pl+D1MiWEP2{&sOMT zW$gzE!r9d?o3Gt%Qq?wM(t|Uj{3~NJzX#VgVl1uR+bQza=S7SKc$|f zA2@X))2qABA93X=Ja(qQC*wFEd&shfBf^e%5SYN0KGrk9JCXKu37NhfFMJ;|z58uA zCIjH=NLBcc2iJlBSLZ8E9f#Y21FQ2WFf^(6KMys_@;w(t>J+L}6W(txsKcooKQ{(b z4u4u2WD(__pBE%fcfa-2Bj;MV(#K<>JtjFX>O+7P44C-Vf)k|(RXPt$Na49@i=|Cx zj3p>E3f?Sqn$WJAxKS~opZ?fpVa_u2*iQ!M$7&2ez9V|0hn@iK?c9R{WEO_la2I)Q zS{?O+R&cshRskXEixQpFsD$6YUfJt_&;gStm|H5xt9N7csxxT}-H*X- z-9f{jbgck~_BwgwRDK7;0NbQ#4O5ydPg@I)I^P~OL?D$jJ5O1$#Mdf&Ak+MnN>{?( z{kjWXfw}lRY^O3wGAu!7#hT@g%SC_|7bCf*@VB`8_9lFW{!9~&Q zpz2xLR00~zV4$|-KmX;M!c?gkXIoRkBMGS%bEBf^FM^gTpKpVp^<%E&{=y=g^6NMg zZZtQJKI!dWe29y>ho&6xq0hFrjH3oibohqw+hlS2lzA(*%Kg{RnJ7$J`!A2%`_;9c%!1zHVCd>XuwoD-6kwnBW$1Sc$f3w3 zB1AUubTVOksb16nhc*LbAq_S=o3bCb(TfWT2;$kZr=I@6=quT8$Ti}4OnnY5dm&nVBum&g0(Rxu z!I~OfxR!((O$YrY7vrjjw-Wx$IiuzMn!sMoy@E(fQ>Bkr(_At#un4F^f@EW6lSICx z5%pxXXFgar5mBUzt^8(|s}jngUQj)}gMB{&B-$JJA|2lr3C@%E zw8#wZ79szoL6|`hFl|bDouZHGUM`$P2?I&TMgTL=H|^X>LIzTG!HXdjW?4n-BA3I< zWNm&;)^inOuw`K?E`Hc#UAibgP)O%B7{{jB$C{)3^%*F7*^Xc;jKE8*C~gRi`4VWp z5PJR)d^|qY5twra^x{=g5nQfb=+-CrVSW;=>%FyTx=HVpv8IPt;|?;ayIhnLt<>R7 zk70a&PtF~mzUodpC42S`ktj|OoRe%^9_rfTJ&z`=!MhT7peQ~PKbH06?o{a#FN*;c z$J1W{_bxq5hIpP_xAql!_DVtjD(9!w9$OF%{Ze~ZQI5D$y=R5xLdg?1yyOWIn}Oko z(-pM*mMo1%;Mms)Wi<(B$Ba;#x#6n$=d9W=^_ z*+Ek7iqaPBjVyssH|bw>5r#~6L1c{P$*w{(C#A_))`QPd#M`S;-1l7!(IVsH7gT)L zK4NQpXHBMhbz{3DMlaWNmCc$ri|YC=6uh=9WuHQPZuD|&UbyR8cDtHu3A^u9lS1;0 zncrUFm@_P;U(IU9jh@O&ub4l`u}`8Ap?!a2_4`UXN~HNR55<0>GFNx-d7YI7=g$6D z65f<}sE@l;J#@=wAR)VP;T+q=Z_`zUEv>;Tj3aBcx2-~^Z;8yIdEYm4UX+!eHkYxw z_Yj$hGcS$lk8qb$8$(n0`YR$vYbX-&sn?WXR0KhzNZB9*%p~*?2^6xXd zmrO+F;;MpA=cNS4@Z|j&L8q$U*L^;I)V;qqOUZpVV72`nRoyT7t(U)*WIZlD!XDNt zpu7l&SLdNXEN^uYl5iEAu9vb&T=#B|&qpica5vw(quL7^{n3R?ZmsXa zH|}CwprIzQk(SbjR6PFO>F3W$R_kf>mRftLHTgfvT4pMzM(D;zqt4xSU*xFab;ki{jApUXWb z8-yQY*ST8fE29stV`xW8vXXwR{0aPhxqYmW+LCp>#uZKs8TyzxYDg_O=9ZHmCx{9p{YSQ$qiKmIgRTtZ+aaq4{bBOc(vAfF|o~3em=PH)4_wW1C($b(>2(~Gg#oDJCSBn*# z&y!GKL(b;^GM&W!OytP2w$x_ExOp7Hr}x&khb)VQ2a85IKBND4CaGWMs*3@y{V0@X zggBh)APbk*Y4#w;wCqj>cHT-*4U!Z}uhMA88={swE6bNXVBd_Z5BWai`#2~lfW8+v z7V8=GftsU%MDnx$P8>H@bS-U7$!W8r1nr?Vkc`m-c^^nUb-JXsokMb}BEQH>9xw_> z3{f``L4_2#UmT9sytwnM0_5u;+KN)R|a_>Ww6$bj07FO(?j$6{Q}6@MBGwP z*j>&q9FMY*w97`iZT=0W>ir9&jR|a1YUMemSgN*^|8aROTfcd5)uAbwhF1ltilgB> zj^}A54e?Nrv-Gf8pN3RvGaU1uOlL@A?t`Fz z_qz8wk|5qd`RD%6{Ari~pNqrSlIHo{^V0aKA}&?{urZjb0Z)fZCO&Y66!ivZ-Uj0Y z)Jz~=aJh90M=}nCBOeP@McJs@1<8z-J}5ME7*&y7>*DLpfqZz<<|Ph+y!_yQyqMZ~ ziynaTv_~7+mLiBU#%Swu5x=i$Xx5-#eAr=YXgCh}5a^&1qFc#=P#IFeLqT?X70zi7 z9u%L}NiMqDjczUpI%K}du6j%^1BpnaPQ%78;?fcA(D}FoLBpu20YnF~0(TKRNI0{z zv$MxFK%LwfkX_&tVX9c;1xnW=JSujregC=-RC!^PfRd}a5d&UFPft`tgasrUHUB-X zn%G|^h}hZL!OKoy*9RxF2`D8HQ1Hs(q5fc&cDby^Kr@xosOs)Mf~!atT!jD-6k%H} zpkU-oJ@7;xKlfk59l(i)2m339NQI^L_o!XYSWyvo{8;+UJiCzp?6oGjXJ21F6G6h% z+M~tKuSZ>kp^@a5E?t`Ah7v`DAxH#M5y!_sg{53dL_Uvprch1kPw)AkM=DiBVIV5{ zSD}iMh9;^?yW+f8R@L)1m@EGp%w1VFNt*xyg_yyaI{LP;ry5BRxH{<@8F8QkrmoIq zAxbc5g?#eq-9N8-zX5^1!AY<;iSMRsIy>fesDZ%dca_UR8Uz2?XAhyz;?H&eX*CLs z<=A&FUKc5~sihiByzJAwEnzt$Z|h~Cq>t`xq*kP#KY}TzX3^89oD9=2Rnkeu!UE|$ zh(=yl8Il!4Hhz%BR{JyaqLGJ(2LM$Z0V7eBhZH7Di47E8t#QczE)T|9BUgb@^hg@E zfn%FlXS$z&;ZQf?48N{2s_f%2F#kmXs`Zdo$@=RGG4Yc7PzwH2o=g^z1vE;9E)EtX z&E+mvewcuis`e$0MQtsu(nI+LE}X`VFX=HtdRCfjgQZ&x+m=jl_;pzyfY}%C|=)VVP}u9X6V9MHQNyDc^OgLQ_4QRg4U*$ z#MdxIR&X?jD-1MIa zheBsklD4-|LlTn4e)VbbP+8xkdu4TO)LhZBDX40`j!~| z6#*N+Q8!UA#}llgNW0iaHhes8UC5WLwuscSMUn(loC?hZLu4Og6DHIunuq4Nt_%Yj z1M%k2z9h(MX<;D;P|y(^wy&z|tLW+?5)w%5W8B~l%`%lh^#09jZ9>v^{y*Q3BuVFN zi!!pdqm+{!V4cBI^^S*(;n&7>A)69N((c2o$it(t07oD-#}9WayasS6jquucB}ohe zzf!6#jA}A+Q{ko_{BZ4fdbf~I2G3gcjS2!AP_5UMcBy!E%RL`Ld5#a5`slV@ZC3Yj z7HI&*b2x6}nQpk5Uh~tocpg2gnU`hYP4t&?Z5x^HR=*3iVWfNPg9i7Xt$LI}2h-F+Nwlvl4^Q`h}4RjCW09JJ29uJ+PfWbiXw<+=jM z?XfgKEPyddcolAyl2B*}YXE)+wA8|0K@D&;~=S{VQBb6OBc{%9cW1Jvkiii)%p&nQM#%cU*6^5kdUq`bns?l zWxDx##RtG}3|wdep739j2C&Jj8$FCM6);d8V17zxkq68EI>1-JKg#FKfI*Rxl9DWZ z%g3jHjhOgp95d2`xcaa*H`fE^2~1vaJe=|5bFc@4boc<#QUD7UJU~;s%M1*C@Bt!u ztYKKh3Zp$DqIBIoI5ec~ay@h?GjXZyP{+J*W1U$ut}JOvlIg@8l_>2axJ2C=4~$?E zo6;_dm#rcIR=r}cC;_~Ly-#-Y7Nbng4Ep(iWlDFGU>Vl!$>g&KfKR3{ zycyi%y8`$#=NCl1nE)ResOW}1m7{4p;_a2#F$BDMSt?F+S)-_#ihYd|yqA})Y{>mQ z0Wx>T6W0>3OxpejB^4Fqb}Y9U=12j~m$O%j^!O~}(XSuucRnb)isKi75PU~oyCX+# zz7+#Q$pvJD%BcVpXY==%qe-=!imNJq0HA8XCeJJqumr@!`lJKQ>ZH)V8VF&a|9t0@ zKo#C7UzA%AuY;C?0z70^5Zu;ER+TM+7Rx!?Z{^`p(fwe@3ld{crOXo*`2+TH*n;J0 zE$AV82~t!HCn0~DHi3jZo=qu8&S#~W?xDi-FiWDyh^uC2mDXHTYwi+$hY zb&Y}j(X%v=eMi{ZEL4v(A&OVQ6d)?iuU{fp)Oj=uID8uyaYYGn@x$#Aliv1rHcn1K zervO1y$-v=+Mp{P=F93>#|l?qr;d60RIDvN7B=jazI1fwyUxzeetyT$wwM0F#Pb1r zcO4R~!4`_ge=mg&zb!*s_YC|IaT!sihsHr9h)dl{=c?V(fDQ%IF*QBC3TPpa4gi{2 zRZ>E7s{~RCF(fBuL3Squ^Xnl6j& znVD@j6~{LZh)7;z1d#XtvmbQ;F@2(b<+aQ8pad&;xt`LgwqTvz9(IVerd5VmfcpceEzCU^TG?G>d zDncnx6)e*1lu3C#uYPh5vlP<+V>P47jtIyn5+{e7Du3MYGXMrL)|oSOhdp8 z!G{=EWB|Vp_a2_eXnPl6*dnDjMEnjbmT$j%_E^+lYL`_Ug1doF}2Y%ZN09|^LpfRBjk6}nIJY+VtMQx@G;ftr_L>B^#!UDv; z^QVSfGW6_Uv*k~;%EP@pCUma2cQ0}p~TfHfL0$Xeln115QQB9JH%Y1f|% zzE~z>M8GhBHH6ERkqaMXRW&0q9`f`d{|7F^+9pmTdARQVYv?4Ba@%9JE$yVy;BI6P zfv>nz+H&3%U{w9%$sWo2a>q(N;b$u4F(0&UzK+@R`E`uoimvUf!qAX1pstbD)}J=D z>M;>!V5%h_{*+1h(bC5I}1Cy-$LUEv+`rv55TDmytsjHa58{O)wWs-dml=(kV24rvf~ZG@!*a*bP9| z#zT)P1B{CK$}r(Lh#gg>WOdDLs z+eXj0yQw*f5?1%6a}--cxsQD42L4u9=Yd@}(CL z3Za;9CJC;1aT%feVH%Yu%Xf-bYJ=2xQY7bnbOkzAu7?W^=e2p zd+-wf8O?KLiiFgnPuicQ%+{Yng=mC4B4C43Bgq_J>uaf$HO$>fAZc0=11svbhG6cRw1LEg**o)myXg01w5UD(n9jr#?N zlaLpQ^v2FcxV+ij5AKsSDrA!myu)bK>f18&l zW8l5=uDrC`gUQK^j*$_)9hVKbc0!Xr=H4XVengQDr}}KP=V|A2M@HzE-iu);a2d!j z2WnWx1f=TLNi>q|&WdC7K(^q!QQ|60E1rl_n~@X^ccU;2)zxLqF0%9sD+~i*<7Rdp z4+RUkENk;JkTb}BK>_3yhAKN=p_yft7GRPf{+)!QIq^FlY`L!pvQg{K9Xofbp;8ev zYd=59JmTDojOwZpP{qAIB`3e_qayLJCr9a9i}G#j!e`0koE?rp_g7`k|S z{;zdYT#LCx4aknr(L`cpF$Pm|G_uAr;;{{tP#UA)dN%&71T17*qNuN#1Ky4+T=w_C zrR$vMn;W3yu&rcLZ1(gxu1eC0VTHkIlsfo|C3AawJL0&tf|k9a!Q{li(4h?f-9Asw zJq4?aVC#7~gDE7DB3q*8I~LoJJ5l%K8QY5UUp2{kDlwz?Rw}TkTk`5Nps`&2m^%s( z(4u=pL335}=O}o@ zKJ&{Y6aEfHuhJbd3bIq<``N40%b~`i+>hBMw|H9aT}WC7T*4J5T}d8DR_zXPzv%{{6;x~ z=5rwc(jaddg_`g1@G!3vi9q)ZF(YNcx6)-oeOrGKk@MKjnwl{p+PdONcz?Dh+qcZb zboNo51cWO9!&^dUd35DD4YTYNz=S0q4Z}(A*$UC@Y>3%|2Rv9r=YW|3RT=tl+5NES z=$kp!;1r!P1K1WG=l17U1Y0qaz&2DmLM|*eGY_)|Vio{?L6m*C)<~F00?1d7B_P^) z2*6bU&4~`dh&;tOXS4kjAVI8kSlXBv75DByk%XZVh6bzm5GIo8jVx)48aE(yM<7EUVqx z9W7|?U%T-}KC{%(mZmS<((_Bqcve)7l?%$SrT)f^8*Z()_F&m;_m0`OcmSc`;_+<0 zmzmi#q|kmuz3&ABseZC24-Jfs9o)r(HPKj@Wxu@_age#$5wE#?M7@ysY?6p_t}=zz zXDzUs*wE+&L!c(-HZp$S=8lU148$UwE|Q7um&)zit%ouA6IiNDw?vYnxY-xF{efBs zxrlbs1T1{=0ROqlG{iBmi^yfH&R47jPQ?I1_E_x?w7th*mqUnh_5f!kWRK*Ms{g*M z@|Nq@ZI(-a_VsH;+tG=^=YSY%bkqhwT>u#@w$gnzWhzM>zxn#(W#qtCZ=prFz70;S zstBksHeW~pHuI65AJs--(U7J02WS*NY+!^dLtiWe1O$L99&^JGJX1?bHa2^uZk`1{ zWghIoqTdIK9v9;uSMF)!Lrm8uw_Nzm=T~h|gNSt$Yz>PhItjmP++l&RYWh2NNgSdx zZfR}J&dg*ts^aJ2c>n=HHwsD|G8PQmx{eGkQ52gV;}xx)FYchP7QG~SMe8qYN~Lti2l#|u&@Fyy@2{v3W~(llSC2C;}!F%x5lMyYo(z##BU`7Je~YsPkkEn zwq00G^^vqtXq07haYlQ-q6qx*g)tB)*qHP@+OPdR9bWHuyeGBuW6vaNq1|7??f#C* ziRe`ZvqjotC;xU54yWTn?R@6#A4#KGohRkaS@eOB<_Owc0;XW3`9EV*IJNcOG8CKq8j}%%NjLz_wUlfoO{`XC~IeD0@b7q zlN&c)EOZ|n75c?*a-BFguXk2~b$%Dmb&JS?Q*?zn8ye6ORP=FNF6 z!(SF=e@3s8YdAs-02S!%dereZ@ADM+o?QVAo&sEs&+88jRSHv0)!c{klgF$6m<3Ey zM;!Y7PSt8VKclJZGv)vL?JCsnFVuDpCG%NhaZfCvFVIeUFdo&PxZ_i$NJ_v`y3S(NLzS2q5?CJ^w!gjEON-*YaYEIz2X1tUdew~*cl}9O zh70Gbe_5UUF1O83>5Bg6nM>pC_Xlwyrb$VGQW-f12heuT?v#Ui-FKh3pJ$?Y>ct(u z?Ts7CzBm{#@vAee6*<|d*EL$Xfbn0ayZBF#@I2XdJb6d&MCoo`aqGCGr0C`36_)%0 zS6qVsZcFb?xj_P2V_M-CT)L?^kq3>VB7Qe z-_4%v`~qiO%0E~9nVbEpgvaUSt6>ef^f$*LlR4_=-SOuZVBxrZd_0n;eZn`@E7#E# zhs|JFzxQT2EPJZq&uJETGJwbTooP8g6e%Amvyv9sr;#Uy7lXEaw(CJc@n|63)hbmfw*XnnhPc~nr8ren?DRd=Z%3>Pn=!gO@pF^L#fw8HkDkxG7@&=& zcwO<0&Ev0g7?g!xX$!F+8GgtBXv{oNRBZg-&8CSKFBpzR*n--*{fhnMhT36$OQfvX z;ml>nMf3%m)`lh;iQ`|nn(G=!bAM^)*O7~m?74hy2=t6HLCH-uwN0KZ>xR6n1{cd^ zj|BC|nWQzplhc4Ysw=mshppnz?O#Ba5vX_$kYmmpiyXgRo#yJ{5i6s1#FbsYV=Rj$ zOM@pjDlTqMIe$od^4oEg90#=_7H<+wosZdLb8^`0?=g2VNuKjZ4X^)kpVXl#WZig4 zY)Jh#WN%!IN;SE;+caKB$@hlM@?fnIR#!H*tJhKh=Kj~MVdvh+CYMVWE8t+?^(har6ltSwE505dn{rxE?5;%;Hs64Gk zGyAk?1?Ijc?_OiQ=Qo8KDWtVJ@Q)&2m$K>^Jf7ur#X;?k3E4bnrw-g)+**Q>>3=kZ z9+CPIb;xzHMo6y5?=+R{e^5SGS()|qs|O?6LdqjF<$CC%*&+L-CGr*}ze|qOP5VOt zByRmxn+?N*oCnDgeC6M_!-N-PQ10;VgE;*;S5AO5-Y$5jeTRlYo08^cD0xEC*Td4# z`b=FD0JuUNhlko#TJ3M1KZiQ;9?Em~JMqS6bnD1>hM0yy-^=9$PKo#w-$I+6Y->va!-jT~Ql^vBC%+|kp~g=%Nz$ro2521? z-BNESDjuAd%4%x3b$q(L$A++a96j{lyQbvJ%)jN>gG%+Qj+Nq;``2$3h9<>cPw>R0 z7D|#*?!QQC-&`7`I$bVFh%s~bgKHUG7=lz#Ig&(_`{no48^5cx5*tB)_Sd=vNm8wo z+0DVhAUx2&8MKy>J+bP!Um+1D^-5Kx8Cg78h}F5h#P44BosYv4_V;6{7&;CMqp`MV z`zt`@3rqmN;y3nZldHBUO1e$|dRRTFI_Za}a& z8BB}BU6O{b&6-4iw|0@vYsgwvr1P!!a24iB0=tbuGCHwtnXovMaRw)i9?$ zHha)wB80;&hhE>>Zg&jh=`jgY=2^haJTfw8~DiE9XcY7lIs+t}&om>6!KCk|V(4<_st2x9mbAJs5Ik|+^KhO35$`_3r&=-Iy84br`z?s{3K^6?XI)vGY zBmNe*6O=Dv<}kUPfXi75X_HaZKz~1x0HmJ6zr%?X!-5z@77Fr<;J_zfH{8_PY8MMy zTxN$L7IgXA5LAe{eWsg2k%D4N%f4{{5J59C7~X1K*JsB8n9Hm_D&O(#c>-O8_`n7!knC{Z(>R-fe%KQ^}N9oN!91&!6EP4 z4w@mbT^*Y;&`U(!kgPic?%6g=gg5n^37}l3#G7a|iJPXhoQ$0MC|M6xj56`2l z1r3|P_i#W5m7)!jj^SKQmM#`ZPLjgekBf(gSSjYY`I&(&uaWNSTS9 zOD)i)A>gcRFo7#IS0JCHp-tfZaU2i0XWVQ3Y-=C?BEImJu>ko1`D-v}q00I%*wh&i6=0)=dC8TfZV zOky!766U$Gadm@!{;~K{rB*>8sf%o0oGvJsanrIPBvI~p7%2wr_TpKmIxW#qO#t+b z+w$Nosd*^>gZu``7?7eZ-BJ9lYcK-zK(Fe4s87fVgQz0Vk|Djo`s)aavg;&Rpo7IV z2Wml*o*PUpbQZst23R;b35qrEBw+NwkfxRN`SM0=qtEa^H4o#XrMv zwh(R%R5x=6NK_8D;8Tk}YziUXPYx<=s5EWF!bg%&=0qnChwKlulr8&H4%Ba$2LzeAEBiNzJu>v4K8TTm^xmzo6cgWm=49L!Y>ne*KM8`_x{ zCK@e^2Uta+vAt(J$r4e3+h{7z1K?PunrX#lKxoTnqGVQ4jP!vZi18;Si`1nAou0_1f3&ZJGGe{t1rQg-T#s#wZ6*@)mw!v)yN{~{ zXX?ip{J&dv{zGibJWpP7zt8{_ zUwrZ<7B|4T4(G^`IIn@!;6P=;{pF7>0}URH_$+jn7Wel)_OUyJXb#VlbtEp z7j`^qV5@uwIoZp=ZVe;XLLUfN0RcGK9x;yvofl=dKk48Gn^|kEdmh&l(4~;M;beV7 zEY!=`tfE*v&hoWD>_!!!{;(Ii{W6-~PgAqwjrJ4@Ztx3GQ@Ve5Qt`r2<55UNC#~yG zWRW65170bxPI;DexdFzJ2p9h?>~!;YScnj6~ zTh#P;lJ*DZNjr!XkdyAei}GlV`hVqt`&xj<1JP$Yiq!ypc>#RRFY6WTDWYSa)_*6E zvG+xPit?|XEp>hZ_H*DbxUCCTkHAB$X4=;8%Kv$z^+5n4`lXw6EGG7c_R@O&(P_(GrkRig9UF7MVdVB z4$py=6|8tY);|+qSEv6Jwe{#^*=9*jN+KX3SpxL}dOHx_D$yi5RYSyW^<)E(TEbrU;L;Ah;|b~G^52{M?kGe7;L3!yP)wt^#s$q~Nij5A#>$jcKF-d(7> z05SO4fK77R88D>v_DUtK1#CckKm2cXd!0`<0i<^qyqXzs^1-qkH#`QP0c*iKKhau{ zlKc!B>|7_wggv`J^1R_NS5Q6i9 z9Z-;lc`1>367+Fcd7KPz#Sek1*P8;2fU^q0<`W273P}Lm46wkFE)HI44d4*me!e}v zJ=+K9j409|yCGs+WTXNBx(k|wIRknFOr%eB4h(N03F_X=nk04JiY`aJpdhT2pOnbqg~O3NjIJi_{m z@*#ofNZ^pwTc#{G5%zyC{hy1*1|=En6vD4WYTFLM{=ghk!TEVS7rcW$Sa>rGZs3A~ zge!x8-rmsk*T7>u`wkF@ZnMvU=}BJwIkF4@tYB1MN3DRtFyDvX0)F4aPM~V_bc0!` zKLwOVpz!K#0>;4>unS@IfVO?$_nTmMj#jzc$L1T1?cAvoC7rm_p7mrnwpx5ivQI#p;6n$ zEw?7W)$*4CbPAyBq1`gO0YAf^4+ym+z>Wp|2Mn8mRDU7RFKCDj<6~lcV)7XV|IE+B z4L(J}PP+{#Q$-)66!x=3nj@nKx{P{Smg467DvqHb4}y;*ny% z9)PTwSw8ezL!cF}ZP4HPBH=_uM{i63WwYOEI86vFfiTtNdZ4kt(qq(W47uvYV8K%? zs-%Z~KUGtMk_;k>r6eRGXpiE6?tk?^nN*1HDHf}(Gcq$p>tv6Cn;C47M7mqRZwfjG z#DpcDCzu1^a;e6)>(9GGhyF}}`ZcL{7`whY+1h9TvDj@GDVh!2X`XsAG6O((fcZAh z2nIu4hoR*4tcHS2T5V)cZ{j?|2ZevkW!7AoA%MP5kV|=O4_NyE^_uBq0gEw!GC+iM z-u{Az(m(Gm`Un`0n;@h&Y;3kbaqeWGq5)f68S6yDoaH~I&s#3WnJWb8w~CO>sJ1o0 z`>i&Nk-hyH7(3`v^r{L7e0yTzslGDUh@6d=zOY+h^4ZD%viqTdgOB!;AfbT%3(+rw zT*!8{k{ApW)?)thPiq{afSJVu3_0r%VD1AH#CT0QZGHfd-?_MPHPPA4Pz4;gF%QoP zIQuu9o`>9mZL|ou+wI&{WMpI%6e7pg$~Zv&hF_+441sE(W)9^c5E=sFIx^}~aC`r+Xr zFiQ1AQ}62KOrde;wg-UF2N05uN8)+B=Cb5=f&!8JCP3c@!zR*JSJM)HarGwn`9`Q-Y`nZ`#FFd1w%?E5?as-- zCwrLVU~r$wNsi8e6OLww!-@(NpVEA%z6sP`E`!LUbeK|)`Tm^*9o?%xa6PX>wwMet z&k~&GW<5a^>BXNpF8iJLNo!<2Kr92WcDn)^BbLs6 z?!mD_H7?PoA=WMe?*Ll;Z8&kL7bJk;C15fVQTQYG+#^-cbJ5;99!s-T976XC&NL7N zArIK9HTI!e^yA60Thss>*#@C8L?LGH1cz4<;@t)iIS677b*$r^^*(@DqliWeAgYEN z0Xs;5o0BB;7SOu^9olgA@Atnc9W<8h%q`r%Tf1XH$VWU+SHRA=QWsun?7q}F8V%NB z0|Zm;Tbq)!>w2^Va5U7El-jf)oN|;Qr4ot3YI0(t0gxc5egk;hz5+CWLrMbim!rj6 zf>+Lt^}x~lvfd{FIEr)X>*x0`W#sCKPTJF14na6g2d^6li<3=JHh_tqxGI}t$9mkS z<~P$pxTCV>gznZe61U;$c43btSP}=IbR2pFo6af~^N+uX${Kz?$3d>RZ~IbpZU_Ci7};7E7|l#K2!no-uxK&Yt> zGF9CtB$EL3a#@s~g;|Y(x!^_hx?~T|teJqNY8`g$k($1g@=t z14q9Pz)^`^87mtx8JyucY0Py8A=B#AJ~&QH`UjHa8o3pqFvNrTO<=6$^M+CmIK#Iq zYkmoUcfWSM1NM+eT%c>*fSsJIhOGB<@Z^PgU>$_b(Pu0@Z+Cva5oale@()X8Rv3RNx>q_CwbG3l74>kDJ@nr^XbTYVKFHMW(6q| zI;yP1S;5=9ieGP#sK#F){+Y2Dr*UqaSsHUbpQ$5uoLZPOJ-7j>lm`X}2M7A?1?tM{ zFqPApD`I0}=s#6RIJ9WLQn49dUj{TvR#(Rc$n#~K>;j2Za7adSa;Q;QI5?;%C}bfD z({9|zX^AZFaLPJ{J-QD}*7^3OExA>RSzh~&gg@*;^nIg@GAQl{v!dFk=HUKL) z#YYvDZvrgV4}e!jNEQwL!AT)~Ar5aZ2*}8K0QZmx<#WG21xIcQblIvVW#jjB>a}(V zBqJ=Rfg9aq$=oDfQ9W4_r!)h<*~pX0lGM%;BGg8tIWkOt;QXqn0LGq$d6a&oo!=T5 zfrFegrNaAji;IiH!(TAd_xCNhU(vH)I+EQv0~HXhZ<$wcWXM2+5?%J=ccQJ2n#C#@ zc6N58#2*v-@?O$s=I%u0Elc%9QIhpHO}4lHL8Q>1JshS>ev+YoNtcX^Ho=C(48o-m zL)-DERp`_{k|j96V`bZ7sg1=hX&Ij}Rlf zi~~0ns*Q7(jUKVzEU-dI&u zR>lsry1w%emnJ6oscYRE5fznwonr{U1!O$KHc%w8WJEZtcG$?O$mf8MLWFHH5kjW3 zF+*N%MV=sPF;~d|mZ-i3*~J|oDc3iV{`f|Ta`?CoZRG{n&gqHz>63Rxet;Md%jpuK zR+~At_EB}&_X|^6o`rJrW!__wwy8>=$%I=mdYT4&S|0}l?VeaRH=EuIM>Bn0H!@N3 z8Q}Zx?d~3?_LD^(Kodm1I2=7XvPO1mX{BxSFHB_710WuT8qq*>HZWYOC(N@h4VuW= zHZ3~Xc(-6MeOSmtmLfN>-jVr2w1&@E z_6kz^#*EZHCqU(U?Hk>mB5`m2@`Z&8F4>r$Hj}rxz;QQ)6o>C22FL5yK|(T}0&Zre zrqM~mc5Re*9)fW6HRtdx(XJ2cUhp$N3BJ<8t+uqZ05F7!pOr9tX|O~ApMwt1Z_w*V zMUChypw4rwVRJ0RFQEz$vWh=35P|Nt0@f#~;6iyNW62V$UiLen4KGuI%{p|C_m2;z z)*;}x_ug7ods7AQp2#r%?5%Rdd`?e>=E1q)GlCW->lK&+4IozlQ1?OcO*)x^54UW5 z2+EyWjnZWXZZNK!CTkwf+A7yTaJTWGrH!@cLbMA=rUG zmFXk+U}C{v%mGrW8iNl@&#?z1YJBH$7KpcG;Jb&QCWvqA3pfojUm@b{=q%0PI%_K3uV#`Vkw4AgCeyOog&<>2jQK+n_ARXO0 zk)*tiRX0>I$Dg0y6u_+i)zqYAG5uy!!qAx|a%T>EB}O`!EF7zz$ph#3Yiqdl{4DlMIw43yh5Kk$zFeSP_GRx{cHI>2`fR-)~(H zkH(MO-*8C>z+9xp90W#oK-7K=Sd^ zf_K)^((3N~;Lc0AQWtmSXbnhFt`KQzL}fP|e?)MJwU@v}&xO7-dJFlmuRn^J%?sA@r3{ zpk@287#bN-7#bVHvm#uH-p*Mzg~kJu8ic8ijd&b1ySoGCcdq=su0BX?x~(JZi?(fdQbEK=aJ!xC>`Sszp_PghAB;hX#aukLusA-> zWQ%!;fb$}WTn}U}#o7QcVo?F2wvby{L{=(@5Oab>=TuWtQd$xS1>7!y)g$ppkv8`D z*OcZQPzpuZ%*@S0D{+C}2H0?p6Q6!Wp!rl=SGUpe{D^GOTq-KL-5OGsl2@wDFE|FA!$ocZ^(zQ3=TY1ZIX^9`UxCD!2Z$em zfbvKJU0{3$#yDV~f}nOYe7d!IOMvwQ;9xrB6&R~nB&Fu}Sss824y+^N&cu&s%lG#lK10BylSra32AQO#ulX1^HEuT^dH&47^mF z&)h4jM@oFEHr8kpm#QA2@G6gN8yM1e9Q_G@of1 z13(Cx7=Ul9hr)uLSrK_X8+#uT<|w9u1q^CH6Keek2SisA5Afy4Gdu6zfHN@K($v(} zkyA`#TwGkM*hI`VfD)(0l;ve)z-+s^xR53fD_ni1Np&j*CY|)dQvQNazmh0N(HNdj3mT^mt zu9OUD0ISBob(?&I6FXCy%_7Q-#Ts~L%dLdzSDYl^RtSlRe1Bfu-M=BVT5U&2i1j-r zvjTz3US3{cZAh~$2LS*dkO5#f{SsEBTH5M155kCn{RjdpFDx_%}a}yI(j%KrC?WI>JQ7@bAEn+Wg<}1Hpu7aj1BuIT zruSt#giBWqkXlp%|6?o}M*aZ?LD(;1GKJ-2+fhygYiPurKl0of?c1GLq|x&7eY|XS zwYAsR*ZnNRf}+5_CzN_!iUto`BFXRh!Vh>jSFM1RiB0_c{2WjN#M>%;6rznh(MN>^ z7!+x?qF{m3>gW{*3e01zWGjdmcn+j7M8tA0RF9>Ax0#uhmB%Zq*Y@Y_%XQD`UC59m zfBh>dtZS`vct3pj08IM9>t&yD=PmSw?@{X>-)_dg-mWYc;|Ogs%I>Jf#-|ZI7v5NAc z#@Na<$G|Z9yXw&YZepxN@CT3{tjhUlf}Be}?Z9kSYSNw9lNKkhl3OZ)p~U^CO(2ihID$u>R(GU0llf4Rsa-NUs{8=mfG5-M1o8_9 zk($g0P1ul6ov&L%|06l%t_VD298?>eGbdiiPu<9VOcQoUAL9bh9v^D!_^K|%AT0`R zM2kYUI!fcth@&D0H^NK4s&A$(x=Yl#l&FUPA${Ou4!i%g6nWw{I?kj_ z|7}Wi2?G-&V>{BnIr8jv?VPO)NSP$fb&SBD_m3B=};{8c+wR&$*`MW6sPM3LwRzlPZikg%KdHKxg1%LZj8h@R)G082iY%dT4x|R_H z(6vaqDXw5j-!j45I@7!#%;WBs#}>0LNH58qPq!ExC~PCaP5Mb_e@)^VG*a>`%Fb1L zp`ngf&cX4ZtE%bOMb*>+>Zs(we#7MKZkLCLE8E-k%=ByXeZ2gVatiqGWqV64N{utz z7`$TMhh(k|SVL*1Q$wZYl<-YQK^L2N^}#9BLYKjjHvKTwho*b)5nAQwBeRH`SLRBh zso)<DM8`Y@;*d}n)v#48UfQsFrtvS-amy! z=n{H+r$ud=7So3$qL{d#YcPS-?TNR`n=M>;Xt4(6y-WmX667=OXAYCFUeAMEN}lkV zdpE<0H(>8V$rcUX69~cL5S}f6i-S?0O}*pthG&%6NvS}a{k_z|M4vMITzqCe;|LSA zEN6-Tkg3xW_6M}YPK*1Mi0j69J(tdD_N6-Zg5%I2(o@UUysnTXc;Tld_a~8i&W0AtY*r~->;FUs~aK)&DhV^RGdV;ZVHa9YUL%j!$ z=3_jldhoEWlA1>q#Wqbv1yA?J3v1N0fynqZza?!i`xgyG?-K8ioCx2{H8X82G{rkE%R0WekktEhx|yIYorrX+xGXPWBcO+q`4PWsRp8G#;|Yr=`L36 z;?afD(4U+YEV<7*yoL4hdzdIc>VG;1Dd%3r((AG0MwQ~lK2QouQ@+tdURfqFI{aFh?>dfL6DgL)%2zbYFZFCX2s#dzJrQOZE-5&PS>qQg%5js+(+VGqqrj`t=P6w^YR zVhwLUFj1rne)w)I_-WeXTH(eC$|1K#p;%#}FW|SqUViEGK366pPZxV+hh`r6$FIAs zrdoDY=!1={Hm?-Ww|d4g#+M8a?8gsqj@M zTaA2^Z+%8VrI9`Q? zV7putoT`MaP_RYo4}psp-_N@2Ky4j>xm>}SHaSD)&X%7(8`T$7`;;_F=N>hba5 zX_ijxFo(rlgWyiT>^$bc|<*_Mv69w4Y#N%jQY`VNk^+GMmQkLeQj2P}q z%u^J!>$9`?9|d(q+uzh zV&@-r#l3gk)3bu>D?#_W();y6p*5pNPYG@zTYS(VbM%@dBQHX>yW^R*|iCwz9^JQU#v1*W2Kv z-T}34Rm=Rw`N8VBl+wYMq@RCex`WtEZvz|lvFnfV3|jo07i+itgFMG^WPQhY7iOtf z+>k{+CWVLyT~*I2zf6OAKR`oJ*?%(3|NZ@K{$glT28Pkivo3>^`}Pcu_LS83Od0gk z>_fa4w~9x_tVi=ymRZk+|D1lLalJcyh;8|xpk};d5c70y(|IG+^Hu81CZChW>bqjz z`8hf{31n6-1Qzj^<9#$p&-+h0`2|1l*%ufTygRYGNapvnBsgERCh0L3#usV`>9^KT zrQW~FTU87FqIr`)o}g*%U@^p`whgCy9y;FF{lb6Kc_sDxtJK-KV9C_&?}V`^C+hv$ zwLa|LE_ewm(|oA&Cu$w}>+0R3;$6_x2GNB27C!9I4j3?KQIQCY{g?--C7%P|gl}PP zv)!rsmgP;sYcBWlt?VQGjKw{5THoKsGkz5#)A082$+yiw2RVr>(UxfnL8K`!E=6;_E6mG-5*`Ez~!g&`;7 z)U;LU3Om&;!2^ zi6;CJx(UXh8Z=u+tgz2JI!F#gk3TZZz`)}XN^_oB*!^%oR&BD(n=@5%GQstZiJ9_3 zqL#W(n{Ynk>ElE1Rf;$F{?vW*`NaQYGohAiN8%=u?bjYiY>i{v%h)W*d0~9p{xu5d}0o<)c3yVk|SnDv%HB}VYUBl(HpgI0#HaFPsVhVrP*LkeY&nh=0yDS)!&=Tz{8h-B{%Gw6iF+vE0bB;I&lpOgEPCu7{JiuNwc# z=E84Hp<5OD^ohZ*wAX#q`Kuv?Gz@uO>6&M5O)WL*=TDPOUdPR8Cev6OQmCXAJ|So= zOntVwu&l{5-#gtiyEE=Ogtx;_;(uP@QV0EGahIDSK8nDcNiK%FJd$pB62)}6k~x1X z(D~~E9ZTvmdcFoMW>D}!jhKohegXZ@@d@7c>7zHf)DN@U{ch$=@|kZ2dO+(&B}|n9 zCQiERTlU@WkA8Rka3HK_HACnhov6d<^?M)=`fmCCHICxWuc8yjjv?KFsdEf5#PMWx zugd!<{wJO!vnMX830&O`SNQEi6J<-kJ(t?C+f`c-zFe$@I<}KKI8<3{T0WaEnLVLr zrCv@}v$Ao3qLsIuKh18%W04ymSl2uX`Oz7Uj>V5tHZS0f6{;&@LDzjNkUczw6|Ofh zbzJ6_*Y0m%I~qgQ-K}>*s5P|R7o^paSiDhYPqicMrot(XlIHPNGihs_UrU*A7h6WM z&tPsi_V=Q~YIzyY$9wRY?FnJ+R$owG1FCk@)u-aQN3Dj7;_2tGuN@@bA=xgLsUjD* zs@Cj9YF+Vsv?q(FSEcCg0j$?8O+T_%Pd;{(ZmMY~oIa>hNq8md@n16=oR)ijLo@D! z6??V+;NDO>DC#JZH&i)BWOm|1mVvNXH^r(M8}w`$-e{C2aj17w`^cY+;$Xp6KUI+K z-YE0xlVI_MjmwYgd^6cNPk7ejd48kKtMXMlj$jHxjxX1EEa3~b#+ppTeugBqeq-qS zuiwmsAoJM`6*}Ml?`m+b73e@X09Gpb}T(Qk|cOjfm2I|(<4Kw)21>=~e9gaHg$GVFIySU~%pYb=Na7&k3;NrB zAjgm+R&kU&o{@Mg;^Unyv98_&Ct4i_Bx`myZqcQTJqnxY(h?N8-y-oC=P}JwA70%j z&J$FqJ5MOg%x1zRByp03rfI&-=5~;)c5eK6sq*%X-o_&O;kBx`!CaXANdTt;J|kCV$jf4?P$1uUXWHZ13q!0jc6a75 z(nSH;tvKgThWY%ojw*$)JY@!{7>J5%m{RJDR;ydx56ODEk^AdMY*lg2N#Q zmeqLyDa+X`b!%s!LpHMcWw`4KdwA)uMdFP;)!-CTFFDzZ*3ZOxuJ!fQTO0h-+vpyo z{;n$=;iXIsyd&RlF?e}eN%ZZR`Zr6_7gHAMxS_!p)cV#;G#Yv_wX}1@E`tc>DNgKOwOiVH$nEm!l70o3 zitlP@&bf__)4y&Zq6e`x^RqIw1u%)FF4f&q5zSFrWPW$m*CcUm*O(d=G8#ota#^4_ zxh_|ZcdCT{w9C^;6!6KtiwLzTV2vav7p3)$-b`qfedq2|o<$Ddg zs;(*P$j(g6nuI6%skFs+>$h(WdcMQXLj}qc4G}Y9+x0%@X)ynEA>`%B&r&MSTm&2G; zIH`U{@wsu9INI@uifhFv1w1T%6!-&Mo))GjU-g97S}n%oEgiowjs4Om59DIw#=_`$ zqpQkF#)@}PK)Z~U?vFd<_w^Jt6aONJ-795hNgkL;`-ZhtMY&A$Ni&C>qnvD_fw@W~ zCI>`=q}FJC7}s&6=$_d$Ym;RYNs2KGd~!(Sz|`ufxP2V+#wPrCL+iT2$SJ!Zz((i-?%wb>jf zykDF#4{f1j_ube9K0(7cD^Vi+a~mZP z&dOzSB}e!?oKM)k)n)2t`(dPK!hN(qeEEY@!WgQD=Tn4sd}Akck?M$eNz|-Ldw9La zk?4#>Od6s=L>Tgm-*nny}QS9{f|n`Kzb*h`=K;DZ0DY*bV}q0p*Ucc{s8` zy5#wZJ_*wZGhc)I7s)a;xvvg%`>ydE1UOf8#~gjk&w!f;$sq{lP?rZTa;QD-%u{O@QO?^^|X~gc}q`9k#LflC?p!> zk4eZ)T~PMB2D=#5_3xjPowarg1~BZH?76|A_wYW&QF|gD;i9A8G@U+Ki)d(LU*z~r zhTw2l0yo7OOK!mB!cr&pUV1I;9@C`!WFR`2;R`4f&1TmilIpIQgt|7!(8RUKaR~K= zEoI2IWO_ltp1G45m9d`v+0Koe=+2p2tK4*$rr76>$U7c~doXrM=wopl>gBQP(&J|>o*|PT4tD2a4>2N+8Wb4G z6`1YbZN*&ONWVE*5?zrrztT36l1F=3rll27g9}LS*Y_ubystVsE4Cd&RIcH++Q$5c zy!;QooN+BF2w(_|vbuUc71vvfFCw0^7Z_F5`f1vQf3bkDHcc3RcjeKU8fX~q8_eJZ z-ov4n-Nq?B4Vy=@W?!%3cT?S=t{?nan)W-WLyz{hJd?2l++w>k>|}pjzJ5(OZtE=x z_gdG{r3;K(t~f4Uqb;my#C?T#W4G3ZrW37mUgNjAI=5*6Iz;a!BGEAH z<*;$O_3Uj?keJ=sbWOH`V@GPkU#0w8JH^^^td6wyU~4 zzrL3;P`_=Bmpn|qzc@OIlS+~Fb9rj?Bb8`L&?bu0GQP608p_ZzRF=T=R;%2w3oOZEW%9@%2U0#-OHGvmLCyulI8P3XN?k}8(*tcPB2KW}Frk$eL}g%UN# zIcfP=p zddgpH_>R@%uf`vV(fRg0ZceW>ow)u>i6%Yf?1<2WX2h4woL7U%OPcJa1xI@G?0PSs zD)l1SyzA@0ZUK#*K3whHe!Z8u*ZQw(V=T4d8$7k~s9T)90w4h%@9IYL*f>p(qq9Qb zrK__l{a^*e4J)h`&z+$C(ef*EtJhz~m7Ymo>_3U*+N)kXiayF1JC_kDL05(8ig4M{ zrs5e+RfFIc%$g5SO*D*2Kwha}6L-$+8f7>T#b!)_)_;Ek#(d#XCm zow0P8g6OTUFL@)?vq&=j;_B(w+``^2=F>dq83PWdP_3=$N56ZwQ8sPs4xs3CE@Xch zG{yFuSkqvAt(V9QMVMOo6*6w1*61+C^gjW05%`e5*RMZeyPo5BaLYIcD|1sI6~T;iNlmRiq`w+0`IERH--~})HV8bDEO1Ima$g! zb?UhGS$|cb>w(o?!^X&O3{GuT7s*pQpN@+*c9DkGD%~Ruu2Kc@+3@7N0c3yFU>CE4 z4}0`%>I|D~ET_P^qv)9IC;8GtdQwn`U_r`4yRCznF(rinZvcB7OLF#2_ZZ3%D&7x-QV!Tc$BC^_}czJ%>u z4d+T>9#_ks5O^`yD-blf=RezTbvc~7mDIqWb#Y2hpO|1JqtDaCQDgI(f0&O=LVYQQ z9nVPs!GeF@MHe?_sJ_X{28k>C^GKHS{tt$M5Ds=jzY=jpM|{16maPu?4k>NFd;e2N z_wl|5*m9!!os+~h3fOB6U#>-KClqP+DR)k`Bd#{-q8Dj~i*e|pP zMShSTX*INc`sSpc)LP?Jx!2Z+R!3ax9gD+_&TvJH%cq@6JvD*Js<+_=pwdy^C`g^m z8beI>8KsXy4(epoa}-f%(%HVqvXsU3Ol}}-!jbx=ML-eKyQ17FJ89_G<}c3gp2X(m zUO<%~?`j(TQI&nQIJwd5AGPu(RgfKJ^Y7i+M$hEXo()d2oVW9L>cnw{LF!f9<&MJx z1Fgiill3g$sBz#@^we6Up;2eMDzobfZwDnf)mzd1S@_*p9!7khY5}C5?{-R$&CTPt zE`VA^>+VA()+k1?mO8fWwV5EaiXzi*wtS^G-2S|c)nQeHQ^j>Eey3xSwtRW8!X<%h z5ATfi>(I@(G^+BDo7#7mhPF}vuvg{cI2^<_?Zf@5`!bQvtx!rQd zDe9@0(j%?^HI_$Ihd*YTPm)%g&yhoPg4ql2MB)qOI2y4k3iG0oEC~mYb;93VKlGk1 zpH#TSc9JMUCQqE=!BnkFo2!4;Qq1!r{w!ispHa1;-GdOL6OMrI`C#HwpTYlF|HzK% zdC3%+VmovDU;W8ylX;eU!R+=KsxE6dK1c?C@?xtW?Hn1rE%(f4`Kb4mdu^jgIXQ4$ zI71Q(++6&Gslf$4B0x@bmm337!?1qKg{^MJKq$16Cz2}m)o#c{3kcj+^vu-P>zeEr z!Tfm_BvDbvaExUyfJQP-c9AYryHz2$q8q#duP8@k!-(IbB0ERc1#=D;;{Mxpd@;>` zf5tzU!4D#fx@(Mw_jyvj6QWKNFrWW8P;lL{6+jt-RyX5#tZ-1w_PyDXYSWgTrXuQe zlJi?r$>J5ml*W?nvALU00bEb%P-ReN>e0?xM+lK-JYJVJ0b?aT(DcU1rqUA-gp1dM z7bqF`R%MQA+VrTmEGIe%noQ`NEb{nd=^5BKAWsf7A($sB>8^1Sm#84& z^F?2qcQ_k+o4D(dgfwM=u>d1^{~5ix#`y8EA7@r@e=D5Mc7AWQ+zE63_Csdpap{O0 z_VYR-@&e1fuR<>Ws;-Ut)D@_uW|FL=>6w{V=v)E)C8D!F^ zvdabT#t*9059szHHK}3s2qy4WSG#cgJr28JLYg#plc0dgCjsKtj`wX?kVn+nJ8V;D z&0v`Cb``6>G2A>2`$H7r@`}u6gr+f`r_%AwP9AKY5#HZ#g?2vr>A613OqGgoiCdXo zgv!go$V@fh{@-yM|4wYZF^_S_H^~UHGx|1=9=5`T>a2&cPl&GZoj3B|HsbEYjyqDn z-lcz5c*%;u`nPSbdF_s#Fn&b+bYy^`FTvkuWp-fKD8>I}v(~{)!(jpl@_b~?3+3pn*K#PFY>qF+Z4%JD4bzd7>&N;<;Y#<&s_$0V_!QlQ?GFS zEw;I3g5JmSZ}VUj<*5FC3p5D?#UWP0IbPDU_uhY>l;DpgeeA{m-rGJn<>kvxWfk>Z zH-y-V>^MKq%YpkZZxH$K8wvg)va=UI$bK*lh&JRFQ=IYnk1P3%TDY4y1b^C&bXlbM zz+XXc@G{W1aTc>`l;W%&{QlJa7qvx}_>MDbUJf#fq%|GC;*hHZzX8qNyLqYb@e^W2 zCJDE-bhxGI#AsG3Hh-kg^jr}xLc*HpF|-Lh>hH8_z~K<%I4JF1PydSUbuU(?W<-Pv ztbvr_bDS}iBGNN&pL%i{C05(&_(mIWDy3Y<5=w|lXzMtQrITnNB`b#Sjp-+3%q~_J z+IfoLEGD0=ziGwdzJUnp?WXgFR2jTYr}F>fDl>C0Y&IF~jX(BED8l_?pcKbXQ~F|G7Ix!+v^qVd)=_9M{E3DpQ1G<9wy#S)*_1so=v?wJKMOPCyZ@H$1Wo-rrB^D+j(!P$9HSEi|ipY zupVES?yn?u{X?_F>J%WCeUAe$RQ7gHzi3g6b`plj-0>CBv2!le3_T9kP6Fowp@03`$!jp?m6M zxplfj_|fMo2V|FNibfSb7P(QdQ6IPSNF24Cp&*jo3p^vMV%|aMOik^cgH)1!y)`wV zxy08shn)(Qki+Y*3hh`-L2suMGG#m%ZqD)e3g-`-M0aPKc8E;j`F(PSdRn_mLq|;*inXdZ4 zrI=bdyOOUan*`%ZhY-KXdwXNbV&Q6yT0Bkgpsh_FaaUMfR;@X77lWQ|Jv!w z6VASOxD3(>lDfl7{H#OJg!z~^r5_DjA*%z!SKy+XkIt$>c~6hc0Ue&~v|4=gqqpAc zZ#s}mmP}w2FKDqYlrF`qgZCjt1~&(T%@*u`0aQy7&dQG(gSZdIqcR~%^RLUOf$2rB zb)+8nRuZ%3b6F!8P?WI1hv~)b_c}OUq-dAV)%Y%-*TKSiLEU3hn_FV6taFz+ca}a`->S>9LS9lm4&8=_!f}|6hmcv2n7J zGAYUE*qV~Eu!7(z8(hu~mLTK=52tbu?0`4r-t%-s zoTts%g9@4KWU%1xbFr(P1eO3px@+lqGY9gM{Q-;-0MWq^^Nb9k$!=S*h|2H4{lL{3 zr{l4z_|4-Ck~?EiDgmb%jTSB!O;X^W5pfc#&3YuG$ELhntKNuHi|7IfV^efvlX z=4j=`4vxpBW7}RwQyT4svy}TOoYy^xH%F5l=fGI(g__zq} z?@#x>fV0P9x`a&Jm(V}i$ztZQDeHX!c7gy$GSIPuih*@+y`ON9b>l|2nhfT^*2XfJjgTgm2x1y8SoV+R^6M!uE+6h zX_)_QFT=_2jPOkc*CcqJp}OP09h_TFFiP!F`@@NJ{M``?|8D`EfR^NBvWDSFR(BnF zC}eEk&FO#?QqFYq*k8-mHB$an-f)~~S;>3O$E$%%bNVWYwWqj$f9n~)_IJ%XGIIZW zi7;M-RDE1p&i&Y{F&YTDdX=9!RsGYz$5%%iec<_U^Xu1|rfpj0;`Hp}pv1I6`lno6 zh`2FO*Za2}zs8z8AL{K*pUdym(+QAx+^&%@saGEYXx3B+%ZDeSkuN3N`sX;~A430R zvV0H=b2yGuv*3XYU^>S$8V&=?JH(-H4@ZLc;)__8Aw7PZ301z#p6w2!@jtW9{;c0# zot!Y*S@b7y6~~(LY5^jwZ;r79dQw6Z19P(K2h+0nN*fi~DPWG7E_MtgJR^2WoA}J@ z*m$`bT)#VqtAvPevgokt(gn(UmBaA|LML|2XfY@cQ>Z+5H40D=^>lN%W6bOm^@Sa0 z!Jvs8Lc7yF-yd$boN+2%r4fJnQEA&nw;z&+P%p(J{&P&0Sip_@fc`xNAYXV5GEO9T zK}g#Gt_T2Wl-Z>LP|5O$5`^{x-bvrMP&P*ZV;k?s{nRCEu~=Ut)~mK_Hd~R01zHH0 zGnY0r#>~Aisb$wweEss&wdA)oJDJKXu|3vB z{vUcL3?EF>*mQoS#lozc7c?gI57&S{@ zX8QIEnV0P6g>*wFpwn!g*C2^F1O%LI5XHC&aPns3zp@cjskbB$cxkDNR^(_GM{6$t zlx4*Uum@FajIOXPP1Rp4x?*4%d`d+_cm}8o0GdH_`!I+zlI8xkjpSZl)&`|Do?joK zPV}Q6GG4kMSQ=j@|A5sDQbfpJw3%1mja$`1 zk4<1SA#%*IrzATg#drZwH>Odbmq_3i^ZOf+^a(6l;scA-r;-nel7Zg>JC3#JQ$gCZ zx_2q+s|m#wDfNJPzprP-UM%S%%PeL7ouI|yubs9_t%~< zh)H$c#8b@lH;;S)$ZJGV&%kKx4B6B!=q9kof-pdF)<2Xsj)0vlNK!lynT$6 zvDDw)SWhHf0dmV^0l+@_HpnSg>^;D88_SkiTL($95hJUKxGV^dV^`%D6*Atk=)c?| z^fy4>#+UdiL%~yo7#x&`&036L%lK`Wyb#POY;sqzUrGu9eSZkJmGIVfHFHA%sm#k) z1OEqWUl|v5*F~#fA=2H_pn!A>lG5EN-6|y^p>)>((%mH>pdg|2Al)Jc&5%+Gh;a8j z&->o{yYcaUGhpT)=j^lh+H0?MT)fE#Hs-6~uR$p3rFT_U5RZ~S!b9ZV1X_9OrtimOI>Ac!!z6P17I zqkSIZB(nh?aG}Fa4pA&Zgeik7Fz9^v;xcAGi7r9zol=uSMjZkftaBzK2KqHHX;C3# zI=?HF9FuqfS{@;M;tjsD3W@L4^9i5aiM}t5Qo?0bRCAGHg*hVy@|%s1J_d!1%YV@3 zlJ#6&(s2+|T;-c+%khCdb=PRE*(!UUnp+8uuP{k;7t_02C(PNHG)TH%v@tvQUdKSsml|k#2wY|Ta(#P!8~f#u6KbE7DjWGSHSSklqFYv62zP(Hr8=cg z>I1oyd}f#M2K4U@bCSI5q@IL!K9z48;2gYq^ZpS^Z@yDn^_{8q=xRP^*-Y>R7!zDoEfV(@Gt;(446@E=A01KQ2=# zqovxhZAns$nA1+Nj5^_C3h|atuE50}b8D~d7cosZel2dbkVdN-e?|RWRutc{5{F#B zUkLAA!%Fjr4vmYPWXvlk;B~=HiFtE)3v>->Mb=QmWtD?{<>Sw^NbDD#zcf`Bc>WVM zFHfiv_0o=~cFkcTZJjYP1aB3eym|h6XO=+)rVr3xzGlFjCL*;vQYV%n659T~-SX@W^xU7zZg zhQYh1^uH*c#|iZEP8?Z1M>*XbV`?&s>E(+IWfE+|vnhIUf6@{E5L2lge@NUZAd>QS z<&DUj-w}e*aLyEMFT*&I_kLpn*5yL8%xM}?|7DyZDMVI36^fr}V&R5S7Nx={GKutN z{CT%Yo-Qa=A(1fBHvXZGeD9V*4~kP{TUj-&SIqy^{>X*e`D}C=%Z&Yhd{it;&vG*)RmQ!PGK(_uhr81b!|LLDA@bQ$tZ*}JTu14c)$-p6YpJl_ z;Ljiu#cUghb-)k)->M{jS+6S>woYof|&} zDI$&w**$bF<8&A5JCC!;@WRd&8W0x4*K$XVzFdwxfbS* zyC*>pjn|kVO8-D^Bm0Oj!7R+m`QGZ?_8@GrFpyr{C0L0Y^%v7QNym2gek~+Hw6XC5EE0{X&)i-L7DZ#cZ zw&T`#i>%?LTN$k{!pv>f1nh!bK3;KXtFwJkmuqmV6aUguSg1P3qV7F>J&sZAGWFyt z(~bat`lR4JAkR(FJBPd1k#ljwSIN`$UxYopfg%Vda?WLfLYITfYoXsO)l`MXl!;$_(#>mTx zg|nkmks~{kMpJ_XRO50DkJUD!MNU0#VM~kBU@fhK{TMz+U8>BeTNnO=nii0zt4f#I zFYlS1y*r?s-icD9>%8@>9Bn8Q+1&V#cu&Ae0! zA=gYo_fJeinC62i<||vvu|nV0hbzhF_t209NyW*0o!Vmz-_PN4ZUzsgv*vgQ z)!_6)7|6UuL|wcSd${7>S9D$l2X z=rG2wOIdEV$KAjoTbaa9FhC(-mO&mkE}iTD%dpLgfr*d|`-8W(yRYZiTdUJ&SemLo zK&EKIZ}CZCrv&4Tuml|98{4>dLqsw_6M|Epj-!ZiEXtQRCN~J@~ z@SR@gC+`g*CeZoHPe2;CiG@Zc`RseL+1gK{$?mbNo!&Bee1h~lJ*R~qI8#!H^b1%_ zTnhBPBFiffmRO3dFxX-z0h#x7_7sD=*7CQ#yD1O8CG(%qi`WR{UH#ef3!^M6`|uY- zu|pSCoB``w4g)+x))vW_z@NlA5yZ}`%;ZtGV|aXHY3dkyg0CxYVUZGkRAI{aq5D?h z(hwm6|2jlcEE;^tl-aZ7M;l>LHoOu}+5IXQ<1XwYl{2irGpa+<{6VCWGidc>@)|jB&s()KAB+S<3mx}Q&Pr+OLpnI zU9PX@4Wr~L+IYi1f7L(GRPDzjyosrTDUVoQuRAnkEc_Of7zzCJOIBHsHquBfGE1Wm zE1bT3lPiV8sN0lNb~bwZkhDgoBKFpz-lvPi-SzuT$_h7rw*4f~IDqH1Oj+*DDZd@` z{m1<6V#^73YYKIgS|3_iqx|0Lg0VtV(#12}A8rK)x86o{_R#$v})miG-&ezP@x%!k7 zlzO01dHmecc5;OCAB?UCqs`qCiVtM z6(?Y^4Gjr}xf~ne`_`1^_*RTxTM+FQZ7!IM$}XbgXp>V2=6(P=L$|*Q^`cQoc3E@X z7)pUCzB7?$TL!~cj;q7j??H=Qgj!ix*NJyOJaRc+)5$ zAS9%f*C){Vr|gl(I9u zA2siH3O2aU0fTwcLRLom=liN*vzrYOsW94HaVcJMng_3X-gcnKsqk`!wMNfed(ohd zchTMDIGMP;HaX8vmf0qnj-6^1mG?c?HQX5zPqRhbS-Mnj*U(ySmUOS&Fl=86rr1h%N)P8 zj7YQAUT`hJ+)FNJ7~ydrxmxT22Bz2S$iQt8^cDBrwBm_&p1{33l)uG+dw1YiDQX97 zrQwO4uVZi1oW3TWV<&>v^qI)6>tPRcXW6 zO=}k~!fyh7p8KF;nd`k6>E@PFj%ZAu48bBgWMz;oA^BUSqa<5qDwvU{)AiRytx-ti zs7Wex%X-$UoHs_BuZJk^$I; zFC!5mg*X%w`1Ge)b~$SzJ6;(UsAP6+a~d}I9>eh zq>yG@YRP~sa_^+zEP2*@#!b$!Wj)7w%|sPhJRejteJSc^24F(BE1yFtA}4gvNpchn z9tc!6%lm!A#{gFH*qp{eDDQGRPO`;0XrQe+c&+k~aUWg70Se$WdkY>jq6E)?Cvu>{G#}Sxrgq5jSGLq|7=*~B5(~F&` zLt|^DNUc%3E?u&`+BMqe*oW{2QqhQl)u&;y!(GTn&^~jn-}pU(k8l!q$z$;>W41%$ z!+{F7DIuLF+WeSru>aBy&4`G|N8$tq%e01soq1KAmhl4Z)l1PIndeV`K261JEZS%R zrJ~&`_MpG|2V;u36$7xy#8b8M=ZfIYx zh6;AxpG}$n)lHebaFkDtb>;6nZEDew91sI#WqMu~)Wf*M^MxB#I{>?4f1g5<-yg(X z;MoT%r6~wF+U)4a$W2tbo%tn7|uOoUrV^(ul(bPt7IOr-icLk^W^BPv` z3#Yj&Q&)7ISRIB{GvAQ#C2z?jh&>ruJ6zZZ*K_5FB>zlRgj{ME$WOq}{X z98FtNL9xR$o23c;{ZM8n5IGyA#YwoY#;g|Fe>IV#O%CA~Y5N6D*hQwWe^&TR6e{Jb6_-{5T_^l3^Ft7?&|N5gdPoWD6B{; zvAWj-9;6djvHBUShnklhrCyWgKvjiI5_Zjw z@o~sd!#zL>3}owyt7{GYtku+??LAhcUCqPdqZ6st`j8~P*G7avf$YBeBh8> z)zkqSY$(J-PPVZCr)sgJAhMwMO%d+h!w{< zK$?}&liR=hHPWuF`tC#4QYV&xK*)gxDdS3OUcCsCV5>nKA!C?aZRL7A>j&R^)Y}fD zzSb_^q~0;2nnv2xsO!l&j7d|U?x>Bb1_B0;0iXZsK-+nT9 znfBB1C(5rYwZgeN?;b+#V6DM^GL+3cR(t<`q}x&yYFS<+qY$0RSAQ`7J7w;E)^4o7M%&JorIeQo$ny|5vzyW!uvQD{M1=T(W@LJ z<@oZ-yBG^3#(sif1Z*@oqK4S4R|*=qdsB4bFXQUslJ%$k)TUZ>ph$9@zWlC z!g8iFIc- zHgtuh_e~lNMV87(d1pd}U{b1(eJORMqPvOj`zB} z1LG&gN%qR2iucPUBbh?#rf>8--TE9bOlbY|G}>-ps%;$HN20p*J;k4I6k_tPwVdjI z0|(%Ixu0ht?80)LwF;X*J&nmYjfh*{{nU9%avjVQex&s^s^ora+}SPQ%Ao(a|2F&HVuc%Pt zF+Dqn;0YRwz5II}mW7^7dZ|o&o#MZwp?no zsaF0tsKm`W$H>_-dy`8W1D$-WJWImW`Sjwb)Ppzso?TJ-FbG#JJT*NWAdxHesv`8x z6m+?%i8}IMk2v~_d&Mx~D!G7TGRt))Rfiu^gjVItKVGd?{nqmwlQv8jUbdk(iv7To ziX%f`io;fSYTaAIE-L5#b}Kt|{O!5|dKn_989kR!1Iygze82Oc%6h3eOse~`X8Rye+|`nY%Awr z7<#yn#H=h_5)*prNj9lV{~;I8JQuE|WiE+_f^Df&sBzbOxuF9-xuPRF8xb0`4irAzod+<*624?m``RIC@ zp!D_X6pLtHr_VtbT02~&m5nze3J_TbY%)If8{>>-YG@MOz{HOB=UfN?8px6hq+ zes!FFe)}Z1ij}k<*wDEB0&h}@sToU{jiT5suzJSM2BXx$n?C@s6ZsfYgArfnCg(`d%=`Eru0@}}d zeKdhsP6HqR4F}e2G|T4DF_?IT^a(X0nv+j031lZ*#}j?J|+5ZA8A+?kPE6xX>?qD6P(lR+rnnWucE&t`$= zT%>*14~)-r1E022)mXT7l^UN`O??gRzbj|jfRAg#;3;L}7M#p4;P>Oh<(@atYn49y z_=tPy4)2NLq{`VjY30AGCF`g6=u8Z?h*lr?RXl4c(0gr0vqLbTM3dTHGUl@Qxp26P zfwY?QNnKwX#d14c?t^(+QIE2$P&NR9#-fzY@i3X=h>SPvqX&B*-&gDy)IRj-1~6RZ zJudC-lk%+lLRfn3*Jm=m#qY*NgshM?f4MNqZ@Bny$VkS*&>5`i9`Rg295Sm`%?HsO zJ4(*GIp^ZPfJvmpwyZ(HRu@V~KNZ3!sU9Ko0-KBrpVw@h_e&vrtf!k?xto}x5Ls>& z_9mBby1hxmPKk@(VCeH-TkP&i2_JkbqixV9#_@-jy=r>Fo8DD7r!RffP`luA!QUdB zaWu3&+r+v>`4L>A$lKqr;ppa*S)bigPNQ(i_}P-M$i%@%al^byL$p8(RhICRH}0I_ zTuyKeDljVNC-k~}k$;SX+#F%Ai4@(fP|-j}Bd{y86;wOxLFCr3o0`8+Av^Yu2$z-3!M1}YZW_pD)Z!0QYx!$03y z`x2p4S`X0!4f^S%{1xgYMzcnoS!2tw?%1&glj$L_j+mU^Q8$s zq%Xd2&om-!)0;4R`~*l}kwdSs6Y1!iqm+bgo!J4gV}AXKQ+d0F)QJ+X$nmCa>rFn_ z#qQeA_`iJ6hCURA@41V*$hBEg-XNfkv*HXGm=WckEDpG%bI?4|w0!|xfW{7a_XPLn zO1bvKUz_vnyOkneo>@C$7Lr=5kLJ($D+b*AbF`@+(X@}18JWBa^su*@u0kIs?E{*={jf-HMy`F=fVLv9^%;=@YgrJ z9A0^!|9nr^({>xlsXnfib1Z9t<@{|;O|fQhGHm^%Upy*)SAQFQfr0RpK)HTCi@Z~< za4t+;OmAlD$tXwr>Bik5qfdYQu{W%V-An8wlrr^BZB>!0sXSrvb%N9ql)|#*2znzTJ#xZ^m5W} z&A*nl&g|!)HRpDCmiXw-Fl#y5Tr8 z3tUV#7Ms$26!R3864KpvAenuh=A1R07!g&VPG13Um8`%bXpxDj>vEX1tlY*KMc|V! zz34|S=*((Nck2h#0S)eK&wvI+3%j5&Y$@|)Z&#IBn-SFq3pt)lSRw%%<)z%jbN%;T zG_#x?j7Yw2yV|U@CVBh=bCB{G;U>73a}7V5uvQCJ4Tkg)k1_ruc-N6roZ@y)I6b1R z<6hR5&+!&Et9|rRb{A~n`Uw@Gzf*_LZqb5ui_Gqu-@%Y2#izkw2MLl!1V9fX@BMTK z;sXu~|MN2AO~=>qte6cOL26hL!4k^;9faQ#m%zI1@mxb>Fc2?8aAX>jF)}HdY9&24 zhzB^G#WT_iJa}(QB)V8IUOL5yt2;AleJQ_E^vtI>#4;A3Xd9l|b#=}sEZ1}_BLJTQ zLf(UspzY$Bygzxu5Wwj#>Cq^16j#XIwm6H{TT}O+D%*Z668v$E>3USITiaeQ72#|j z!^VbJ(NueJ!~wX&RhkzHYJAN?$3;<3<`x!dD;z=`ZLT?~oZ+}x9V?I?mcg8nqblXK zE{kqjEbcwL+jj+bQwYsakSAXPIRArlY=@& zB)0*H2m5upfaCnZA7BoYI%~KEQSns#%AXffUGgLa{yW0u@vr`26y@H3o*LZ!kjS8# zmY|zsDJHMy9C4lFR@l|M0BX|Z=klZ(NMqdI0!PuZx3Osw*5Z4BBY6XDl-ynZcYRIz zzb_-D332QFK9}Xx%lZoY>@_v2rVBQ8HL7I4jKNtFB43C^lVbi2XQM@R_4Gzt(IMJrbm7p(jTem4f zFsEs*#;8iKOh|@RL~kxb>3fAUw)UR;?u^lYp88-Y#M{(qWF)r~>n7&{?BdZ})ON~= z;*)pWUetW4eL418EgDaR$hFBP%F=i{H{S(boP(d;$|d7)pGhQu&>PveAHCyFV|p1y z%WHijOyPun1WhHp)bxsbxBbs!+uka~UD@L8qE}``4qu|V&WpC^0*68!E%|B?0{U1| zPV_dfWb;mo5W%LLvWQ!**;R(BqxB0{pmGyf_1<3wmPlj|I5>s2O`TQS>XD298B!}^ z>rKNBctaZL$KR?_SYHOl?42CuqdY$5rpg_b>By2DkvIi{X3lzJzf~BHsFGM^XEC}=sjbQpdAL2DEM$)WpK1VNg&&Tr^WCiWA*1e=0#Wl}*TaDm$oJVy~ zvudfZj${Z#FYPV$Ir(FpHsPy^J>^)lHR}EY{=`I_7EGCSNcwX)hqCm<=`qR8IDeK! zPbYMH*0bDi5XAdi&b{|Um)DcmjbG)LQnwr0t^tw3Ir+u!j$pQE2Y4W-e8`n6yLMaU zGN(~P1V`dDpzdHYIFgPgbzTCO#+%J|9sIGrfc&{W!Bf9O_pevS6raFyG z%YeweJ>QJTm5-%NdB?>0uv8^oi;Ocs<5EqhtU{rs1JNt!kpKxpAQu1{_4`M}fnh&A zu6L`Xa!dK*lzKgdJXGgW*hS>Ya)_GR+DCqbUM&JuH-0n-Y{a{!3+TrrNs1>`W`(k5 z`Y!a{kCDA{1wSKPb~Rhr6(c&FX1XBg-TAG9k#(MItncRb(}$4lb^{mmdo5GilJwsU z>$Y0NT(_Fz%j(Y{;}j$KWD6Q`EaABGtn(bRm^uVEAn8@b6)-cK zDt&rq3+Okxr2LY0>#q341GTnu7AN~FX`6r{0Qb|xjD2-5UAxTOs}A~y)ho9(4=c`; zH6GjvDJ#+^d7<_)HAx{`%O2+qZ;ZHJ0eFvaO^QV?x$)BJDJi^k+Ik8tn|k8IpFl)t z+>w6EGEaO(@aoJ91+(4pZ~0$tp;IlrZELepwP5py%a2PibG^y8$n|!BAKtBm=G)_Q zK2q>w4f03mE_ee!yq?OaJJjdiWc+)ICMQiEe<@3O%F1E;HhljVD&wbYX{`rxw^axzM zpC>-ZY`7TLTp(%frfO2AE3`0eG`+#->+f;>88OH&EE-*415I!%TrT7~( z*b~S+;WSc+dAekAb8McskNhRtzts_%`%&3Go7Ha@U#-6vY2fOzYa^cMRlL`BNcak0 zlHeK{#lC;hZXl*fG$(^CN4{B2V%Du>GcmJfH#xGTH8A+wD_zBbB^6w)?FObXw`gJ; zT*CB=ug}&JSJG)V;Iv3br;%6#r;N=Qv2u9gZZP zp1kr^7tR{@(14 zTR&y+&2$YczxZZOWmQ4?-;tYomoAOJTCyc0>e@sN_1Tl62b)9#-!gQw#V)ox{=RLk zoc}-Ob5gdKp{3*NU&D=_u*&?Mp4D5q8&hu7%G<0|3&o-%vPTVHPe-U$D{<0|8!jZX}#)vS4#1q z#u+^Hxg&`B-nJGdq`=r>Zf|(leQi?%jKE)=vSlXt&FfXt+naTPjW9rp6Gt*)F^1Z zJB1#(st)(8FULn0=*CF{$;FgeQRsgynW|M2Bq?^``0&#W8~(luERN4Q8AcaXBXm3H znKLv;sDz}<)Xw$UW%S^j$y)=~%;|;Qi}PJ4r?(Prn?thb%h{yZw4QLBhcq1hkc=-< zd!VLhOZgLe=bS%TDskJ6Ef3=)DqrA-T33{F zDf4aW5&CK>y5J@-CI5ULNrma1ft%^X_rw2rYQSI1+*p9`qJh~b*aI4xJ}PH0Y;z!L z4fooc+zY=)59Px{?k8Y}1ScA2L2P9BY(igmme;~RUrPm_;J7++zBLl?>rF18#FlV4 zfz_|(Nqu@kh46$wndvw`f+>wK z;OF=ru7Cf#<8>zKJj*?N2m~Q>EcY%Pq2+8t^aV`{P9y(WKD|YCYX1)7v8aM4P!-~z zk+~SfQOFi?kCwB^&_7LXRS}BIvp*b-{$mW!RFG%=zkhQ0pFa<;;-4Q>q1^oEj>)aY z4Or2uu9qAEH;?``*rNaUm&X48`>3CEn~|H#PkDFe|CwndM_{5mB~aXqb!9U!C3y}a z!HMiU@R5Ah#{RvY>D8u)_d6|{!@yWw%XwaeKqtZH$qG&Xc_cIR5z8_dkf;Lzf92ZI=THP)F{V<*q%I3enzFfaW&J8Q`>L|HG zl8^CKtBklm#8ws8f3puQpeH&Z>AVRMBREX)VDO7H`GoL%DYi_q z`FgEo4jK&k1VYfiNb#FJwx=+_s)8CF20PQ-Tyufcwj@#PxASf(uJy?)o)QcD3Lj%^ zE6QH~D4?bNN#;H23q0#<-A(y_{1*kEt;)CG1OFLO4CTRrPZAJ;$tl%t!wE=7R-wJ; zvsSR;nybaq@#pLAe!G5eT39|h{Kq(u|7Nf50uT#~a}D(M^;cY*D50)mTmx@;a)Pn8 z=U(T2JI!mPoJQT3I~cr3+<^T9kjKh0@MVQwOdUb7#4Lb}Q^12pmy=f!Q}rI@2%|NtE6OX-DdidOE}wAsEqv?!Ih4R_8mr zk({hG^+WDZ%NzKQzoW;wz!%uf@*Mmkn@Jr^^Rw0>;CVQ7!Phi}xMY-LIlfhIhS{%3 zbFb~WP_wAbWx|Df=I!&#&fTv~-#uM`{kYg+7CpN6e7B8Nm1;OSveVA-ytkL)&Pz1# zJ_KW3Tgbq@78&J)#wRbZu;u_!dPwWD9DT2y94L2v zZh1m>!&&f4hFhWK!L-+jeeb7a@#-Vs_n;R=l4_J}u3YbX2->?Nm-(36W<3#1|SrA~^Mbu@|7D`A+@O3h9C7_qu`pZ1NF~M+Q1XP`AR#%o`>G8gnah+*C!8fa&h8p)QSz4$YL*IM zVY)$u)G&$$Eu?SoH6*MW=X<7uMc@aB;+_M(GBv1>o=xLQyk ztoD|NMw%bU=>v3avu+sa)L^uN&El2WMkoCi!YGt=7^YMLQK?>1o{?fk6!cJ~%M5tS zk7lo|F2hhsg_6v?+lSakY#&uA1f9w4$oq>Xq;*Oq-y_s;iy1Kik4#Y;t||u9&oxQ( zo@{5P71zTNL|M*ForXJaO30e5QlchLoTJ%@3`c@7M(9N?}?Y zPb5S|I^X4cd-hT{#@Bah^6LeHCq#8jiWCp@=%>K<9X<70)It0Qrd(S*P(e-TTCbUO z!E1e#KE$%lDOl|sX7d$xV0%XNK}-Yi7zI1*OdmCycd7Yaat;0jGQu5WYqqZNqcGBa z6sc8Pw&KB@hk(gWIb0yh8tdcT5v@VMo<@!q;+Ok}Vc8GEmM`(F?i_a=f zk{d@tyvbojdx*DQu51-!8p2tK3_lEQ5-mh&6(jphsYltL0_<=@Qspz0&Q-;&du4EP zl*RXq+NRTzu&8E)98+^YdQtSF3s15YBDv*E0Ro8rCT04i7Dro9hsHxE&xqA2y(V=j zXtftxbsPyJzZz*(kS~?hDCtX&A{EvVNBAt-tbAH|gvSCq7L4oGKIy;~?Rg#gIaI-x zbrn&r9>lC?!X`Oshd-93To z)2zCLEL!Le~s{ zBZ;rsxMYbj9H3Wm%SzLRUrWAky$yzl3NK+NUWyKEdWLqmT=@$|mM9Gk6O=$h21V-P zRA~@%O*YV^+QtFH(Cj(Au>-?74+q@n9W^NN==)Y+etF`~_e(x3erYBvZp={_boYHd z^39^rM$+gVR;CI-ox@UZYzzx`FiL5CqI3k#g&V16(VZ(9*FShGJ!edUgF%&j@{404 zCJ!8uS(pPY;OizQjlG#m8 z2QX2N$$LifQ3~AXN$16<*qwGSVpz~X+}8EV7WA&;w4YFV?(57OBii*Fm$L_=Y7iRF zd*+>953Iqq<$~?xLIjbfMc}wrsr1x1e?}Gs$1}^he24x=(@|<%#9ld{{jyDtcQ6qr zKfCKx`2?h5p6d}()1*v0jhTK;E`lxN>?vIAK zRP!U|jP_P#{6oG)7hsb!*A(M1P!&Cgf)tJdhv|r_f^S_px63~NoB(R`5>}!+uejIF zJWTf&o`O4ig_~dkkyHZZ%=;)3viwtVzvG{v&R~B^RvpLjYKEZZruL>Z-v{mn zTAKiv!35*v#TVF1ck58YY?^gOi$%O$K1?rJc*?S1WPy!DsYrl312MCY%IJa>-0fo2 z8JmC3vHhjCp|p=*^{H@qNq7nyjeC%X;bLV!^oHhtMc$y8cwmgz;o;^Wl(bjIE$mRP zt=8B;j3Zj$tYfS6oJh4)caYWYYLK+a(aClvuXMBMelze<7#m=#GC z+uOp(m>Xd*geOE4!k`&CXsst*_{-~YZ2_+cgqG+-;yqDh74u`E9{TEcTY$ix;^LQ3 z>ksOpQ@KP{nYY|YmdiaFaRgiihhP#bufbDhy>$XX3#zrmT>KC)njK{vplI=w*S)eN zqUi0bBAm-1t4vJZo)~5Fv73G2X}t!JuY5g1poD)K#&g>>Oiquu2)YjdxoV}odpEU4 z71F0bx?x=7!E0s5#D5)Nq$G^e&HM&6H67)W+};OsBFj)|QnVr#A?1`lr-;A%9OcZk zb6ZzQJZ4kZPyRi0E_@mIm`$WuS|i;831;VM_Fwz)Hn4KaugeNp_1>y4{Tv3~uQ|tx zM%P&V?V?fECpHX%kGlZ{yJ;xF8EG;9&Q%A4AnE1^tvU-Hd7rP-bl#t#!!C`%O#709 zL18IY_lGPhX%8gW&p~IvMsp%ZZDLp&+fX!p7L+`cJT(!aWm8W%o}By>Zsf9^RS@Z7 zUXMG&pH=^W44{lnUhDAdjcXe?o+14fAquZ!2_G=#-^HhbC0KNQ#_@{iW2XG??7GT>RBCAdQ+W({?dwW(S zq{?9y58~GZ&er{!{Dw45#3F*W1jawl1K807SqM0}2&sIZm@yVJ^xZC0dmS6QcpDL+w|N#^rk$9mVi8H~EB}cQ z0w(1Qa<}6`qEDe}(3j1emF&QlT}e1x_IRqN^<(LqM4#)*o6FZsrFz>UNrC&Bm@#li zK>kJTInz42`Y{j{@wemitk)Cn9wDx*glgV3Ni8+m#z2P+6`-wOOc&Phst8h)$kRkM zy~Zoq94DNKxj#KJP#g_uY5{-^^Yb!PS~z5(3x)snmWzk)A3v582Loc}8bmPm!o^B<7h6cmws}X$J zb}Wx8J?fLDGp~AYH_jy6n+{Akl)M?W6iu(lyPfO-A%woiQgHj+*XXlH+6PX8`Dy9{ zjr;LZ%pPu?rLxA0(Dh|*PwPErlAq^bE=oKC?)i&a+Ei%U2R}WxJG?*WNitwIc{gt+l6wB8! zE-mbBy&37fe}DYv#4xKSV}^yiejm+M{`IGB=VvFLb@$hOcoT^t63{D#DKhAav_HKk z|Gk0W(v$*)d&Ku<2-`L8#RdssazP@fo^R&s5L>1*0pP>9A$z2c2HRKr zzWYt0`1@Sk*p#2H!{T($O*#f zDf@k(>>355=YVd6N{)&S-G*r~B5#q&r{s1TM?=&qxz7t`M$NFOr@#h4`93%bEz=1{ zsje+eC80`r5}x5UqvhIcCgUGZ-dO?ULm)f$kcLCHOKUvo8^Y@5V!R#8_D$Y|pDY8R zZVST`(rSmQ<}ae;dffvz3r2o3Ng+9Zv$@)pPvy&mPs=&YEKOi2X!z_W8>OzQ%uGRw z)gpJ>2E>1K$(p;Z?elcJTN=y?LeMAZhZ}b%&@WMCC3TDb$C2}ax5kKgNRirbW1k*< zhG6`2(G`_B!!ik~NlZk5w*r?$Urw!qt3!9F*28A(B0&&=YY6=D5?2QWG*vH#q}{gP z!vF=4U~{;{=$!Axi|ymV+q4SDrHKk%6C}uISi8tQZ_u8p!CBuJ*IBa~T|7YU>2puv zagt#h+CEaXOddtfnzgYzxf+SDTGp%r5z(-O>aB)n&#!wQYV<6P8V607kADQ?E$=rt zyeIs3GfpEz*YB4@Blt>Y7%uym5q9F0*yjWs{L3hw1wXCm0HQC4$q^MQgtRnWpkXMd zxS=l6r|?SoRWj-ouPGi*Qd#QwH;zvpxaq_4r?`Y8Fz|r=y?V;qcD&}cj1Fv(mypIW zhOuAeq%nSJz`0=Aawhhiogpzcb^nZl?JqZzbV;@yDQ08V`2&GHS%D3DyCUQ$M*z!g z0~F6!9Xw#zt03-}PUO}52Nj+>9UWG0AFnU}lDzQwQ*C?^-|^S@PNViKE-&$)A81e? z`j_7C?vNwD4!^PcF77nVNqswMx2001L?@2;m}b7J#&mbK<*4NG*T)Z;F1EivJN_T+ zy>(C(YTG_4NJ>bH64HzAMp{A|1e8WPlx`42x?|DZjf8YaNh~@grBi8;j`J+{`+nd1 zoilT0&YUx6et-OC_RJpLv)5Wr-}iN0_jSXRpCwhZ#!em3ua|gWYU@G<8WpoC-<%yi zVA%+1{Z5CNjD26lh*`5Wl=>u!a%@om|G4;LqTG24dg3r`wKi4u-~fJG!U3MBBFCE( z0AiGs-v7w$AeNYlpJ~g2)vB6_<4GwkjXEvU*~Z}X;~6+57DM4e%=86Ad`256EF%N+8~t zj|^5DjCJz7y!dfvUabgOp__RPl@?1YPr zj*vXg9M`d&RP@2N01Vsx@q9^|_`d-7>PpP$%Sd(a!;a_aFD|!`L|pfK3~qk~1+TZ= z&PSgAoNiQ=)^UAt!qImm0#`;U{(cQu$uEgQnLc-oy17wLxlrbh_nQw~K>eQ|Ffan% zYk+chl;%o6w>`l$13<&8PXNucn0jqiDNz$0MWaGTlRcF6M?XBOTF>T_@HA6Ru2w~v~&dBNEExS{tPsVy&GlV@(=@YFP3_A-y=3tX3 zYy*3bW1E*+BEXT!Z7};KT=;_dN`;WE?S``#?!;vuE7_kPZ8?2Qt2<->TD;rJ%7hbl zw71P9pzf3mEVO+R>hz<8q(ziRI3Y}!!cOxu8?t=?b(j+YT#XWE;wg}lgj`u6@H-L2 zv~~i@bVpou&$ul{0T#V#$`Drwg^d6yTs#LBg52&W6*{o6Mlr_EFf@bfLU6NY$GZvU zWyiR&6rQ(wT`6Y1pHt-fNsuUQH?8NItp-RrQUdu+`>o*d-Vfn;@)Y9>t_G-i4w)jN zS7-C0@%tM~0T@fyL$=<*vUfYmZTM3^?#r3Rj z-(}Tw`NWYll}2xr-vTtzc?_~b@VZG}FgY`r-HjV74UliZH{w=Bfb*spbo;mSUKm~^vjw!zW0-=N0FR1bd-k(Kt zeawbU%*o$Q*BV1}KOm|M1RU&>;!>EAlmC33A$&Q;_;N{%<1{QWUnN7SVB}}3Cm-$) zsb=)JMTBgHGUxzcAGsqoy?$BCLAC0|A@bBy-PhPZ@WwdDXF7<`i&2O zzr4eO5|!vmYLukY_<+<3p{TBRx#Iq+BDDajTIc-d3){|ZEfdbTlI>zXlqV=KBTh`t zGc|EJrj9a6KfqitU#1cl&(~LBYKuMj+YzqlyB;O{$(DO#a%2j*eqp_0%rrLfu@lgE)20{q%>**+ z#KN;}CGrKzRsW)|tZDET<55f|t>u99l(ZQF*MZwyqIY&Ms7ut(liOaGsF{x=m(W0r z;#f;OB=ew`&wi0!)Ry`1 zC^3;50V*-qXPUxDYWG*$ZW8C8yJg)j-TvGwT5wywXbMBdxHFZz40tjZ;QYigU4cg1 zRlH_N&-JL+eOLPFhkc=9^?Bpbb>}CDJYny}_;>!aie&w;8U*?(5ZnW+My}Q&pdonM4-M#15{#>Q4`9h?yaZ)RDCx|P<+s=W=Zq<|l?5DF!RM># zQ?l!eJ?fgnpCr!;#na1@_khu1%v16?xKs*3+f8-mZ91twpF#>Z`n!c?Y_tt{nEC@Y zg~D5iC4|AF2;Qbto2Fs_>WI@(Mx4&IzP2C1EFjG}xY*yGtEEmk+pE9d5c+m~aKF~H z>a={}>cwTg*x82Cw)T5`!-a}Qo9f`lp3CR#($q7u=dNuZUlNL-fD3gUB7+S9IN2?* zfJC*A1@AJm?TKZ2+EazzK0Gyas3+J`ASoKhzV|DW^R$-v7$fXI0XQapKbLr=gl4yp zaU}77ZKmMNg#S8AjU|Ria$#uKe#H2}^ELho zv4Z`26txIAu=AxQU&f6r(l$HSrQ6>8pFSsiu16KdlHM!9p=coqeXmwCJfGEb(kMFr z9W3zWAuZ6TMdNBwP7IG*N(HJJ*l*jp`4;X%uNG=OnnJGh@h86^4jV!>(Hn8!aCTO{ zPwND5lHQ%~OrYwW0!29}Pio$8UYx@Xx++n^i)42giR^^K5_$;`Zi1AX@j`H{#%)$6 z&{bWgAanWwk2oG8XQQlo=d4rd@4u==VB(|KlgDaGnWkOGGEHsV5I39Ju|E2Kha6zB zo&(8Ktre%6Kpt#avoyFm((P__F7`fkH}={J{=TQv;#QUMY8)68q%z9+R}&ZwpR8qE zuK7DZ!E`U;>p^V#+U@f1L-vJN4pWIF&oYv9E$-lV_2rvn>6kjp*jK;_QwY^{!Cqz` zvG~KM<(7kQO6LoXTT3@X=~(+*pLy?;?{}i`8%gh%QGI?^Q9dSlDO>YW36Exy>gkud z-YnS1M+M6I-J@LZ=7)fr%p-vYT6n14Iq!AIDC)9116J*={`uHr%feG%c(!Z9g`wq% zgmk`nCCj3O9&{gY72gizMtRKWwtL4Bz%n5XBt~#;`YzXJuW{*oU|(A0d2VCsJ(eSd zo_TgeFk!QTk4~85+-O2vF4aw?wZ>s>kk{nFAEhw$ITYAJfLO(V>0K~n!r2G?%}Ibo zkO^waeVbHh$ z(jHe@**N%vzLJjYAV?sfpRoKQbFIPr&h5p5T>pThg!pHlC_EvGI8gDC4BT4~(W7v> z)4Uw?J>TXnZI}isPngI=Qmy>vbcgb`XxSZOM~rq`1#}O zf)T_33|H=pE$)DpVvT`1vBbOZj8q(*Bjyea8DpL)df$hNG3`rnkxWikE@Cmi}3gH!vq`@ zJj?S;cYYs8oFax119pSp+!@yH0+Cd730$#1rjp_$tmq0+ zc-5W$!aa8J5l6CXfp#pfK|39w7+bX z>e@#j;6!zfsdtHV-JQvPoJ`WvuZ|6;vm2C)rRG{hZ{Qyc#~A-Tf|DFpy(VV8CiO&$ zq^|z^2Z#-Tj{_KpUfugyVD7*V-4Wq<4dB@F_i;)I&@@mo1GmnBgEVku1o&76{P~*D z*t42s5Qv}elYKiFo`fxVohX6=CQ#ge3H*LxLW%{~-xUm=ChIfhyZym^=_hyzFMSAG z_`~NGUrY)ILG3cfdHDe}-x-GZa|n)YY!msGU4H8&Km;+ zYNzYd9q0wG3*3HrYq)#Y{~(d;C&Wz9VenM@66oKjq^=!+BhP29lz_asHAJ_r@*;qnU(RwxqXxdkA@?FF7EApwKw)HhK7O#c>J;1}7kcAB&Vd$dVOgVGyK(yIKO-zM!UXh#&CWgk6U3k%+cCqEV#vu%nNmJL#gr#ap^83_`s=&kU@7GU_0KpZo6L0{8m_(lHbeUGJA}>=v zT#u2iyW_8rJW~*_4DvRWSar(;S1Kx6SkXb2$o$3Slg2R#ZUYFG?`UZZB3JO+p2eM1X?M!_kd%6`K9 zmIC(!eu||HcDh-9-$4?V7dRa{aKwN{m*d*p5_1A(sO8nS{aA% z^S`I*r?mNoMN}Hn2J{Ppvsp^}@C+Dv88{l%VrgdULv@G$iw^ zx(%aXI^hLZ2&3&+aTUs$B5*gTnP~%`2t$x4hamzluNM}ZKs*6kiG8t^n*dkZ{)17j z1Bcm_LGU0m13g-jgSAasN;LT`scCd5733zcjDoM(wU9%1JuiNI41|B=K z+`MmaDN*62rY;X|1V9%GV-U5W!x`{StlizxRcCS!%JQT3BLq8{#WAR_w< z=sWb1xvqb#2^i!L^YVX7Y`4(7+`V=|X=usoBwAt_mb|3!qXgU$m3ZI?s8+pdECwEb$leD2+N^rmg*w3IycZe`2%)Tc;kJ%8$Tgt-`x8dz#JQen@m(r>iGPd%YmlMuH^>whr*#jdW@-?DGBs zgY&odzcXYv_&o0r(_4z)j9|6dHTUwOws&`fU5kF3yzpZZ5q@ za#ei8wsgICe#e~C5&cN|LbETPL!(m>u7?MjaO;zv3ZJUW={7M&+nqmQd^q(Vj-QX; z@;$b>@wq#2_I`JzclW35Zd#n=e#78yBlG?;^JMF>&qWu&F@f`xD|q-pE$XIE+X5g zqt8gA65kzg#V3uF^CUmx9gP}iJY$zMb3kCU=_1X{1!4mr;;|jeLDp^rtY5N4wVHgj9N|`H(B$4dKKRYvwG4a@)KP?O$T@be z$0NSvzTb^pOO;6Am~G>jMpbw5T-C#wX#f$J*anEIr@IGo1Q}UbrFA};+>|SwhD#TN zD6QKm7EEj_+H;NidmS)#ehqp)m6Ye3>wb=Ajgi$H!XmotOAhViq>nBZGMt~hWDCyA z%LD4+bdiru2;V#aUDU1Nol)@1kHc&*2_?xqUQR=1CHL2v4@SHkVbr5dGi1kG5=Tdb z=-wX{j%23B9xcx2GnK*u*9j_FAw9~V^UEaw;z%wqwyS!{t*gI00g;s9!;w0<{E~n} zRp;xr0J;27nE(ebG1Jktj3q^YKi8;C%|9{?e4>0V-M zSDT>k!SMXPgjmegSbkAx03Q|eUmqnDVDn92yDgs^((?JBFIEannnKh=u1B3pPkeWy zD=fu1bDyr+s;b6>8At{M1Ymw1%gbS6f|RPGQ*M}mAr-4=(y6$w;Vf<9b}8A+uY#4_ zZ{Pkbc`SMNKAbOZ%AY?O`!R4L)CG0j^9CwOTz=Q_?KPljw(mRZ|mR%2F{@ zp&#)vl=l5iPcpFve#Sdvhy7(k7b2C}y%1Y|J*P1#An;7*G!30!lDPi?IEb<5r~P{& zmJ|&C^;0%f*hQI=pc;6G2ozCYw0HjQDK2*-Hv7B@uBuvmL1K?i~Asq&1U#+!5&Tyc$ct_iM?skEPRAj2y}$H zUPs-aNtEODH6(PD=@d)Y$uri7^*=tR{Mo5|mP2vvn?Fa2l+&ubF1^XsKP#3*NV_l( z1c)sd)4Uelw^k0|jMQ18a)89rXcs8AU)n1UEsuk0l=w$GRLkXa{wc9F4B2q1vYord z!u(R$@8WZ`xrt)EKS3y!U|!9Z#6?`Rd-k0#IyN>|yVeTs9$Vk$#W?wEsuWbr|J+vW z2aBU2AuFJjuNKmoFtxC&qQv_Ek5~)iB8_iPykc62o!Z#Kg%__L1(DjpF~-g8sw>{$ z?aeON?Zm|W>GDIY9+(?WJ3Ajz^$!!%CNOrItJ{LSyoTGM*8!uF%Esqvn*X&2^;zgy)$h9c2;H-5tod7j2q z>T;t6ey&1Nn(m#_Y)|(HmA%J72G{jj%7(q%c-vEV89)1Du#P%uFWg#fDgN1sz%{gg zuOi(F{np8|#E9v?SB;)2(2if?*@NSmD`#gURVdWntKRYYx%WwG0J6{EiD^v!eWUHc z+Uo<)o4evZt?NmNm36LzIN{^wSM^(4Pli*T3Mqm1h@^p9DIewON7Xw!>}|I>jC&ex zYhSZOkPB;HEzswKZAR-4)|ioyu>S}lUOcMVhp%d%;@7f?*g?7P13VHUVJ>bVk`Q6G zHm9x1cd|FOu)qyRGYrVRvoXwHNQV65?}!=kGOqIMV8&9j`xC96D&oC^rt?{X$*Zb=q$*<|Cp9=)>mdUJ+ z_*~qE{A?!PL`=hUAQy1za}}!Z#g4TnO;W{iX+kpz@=`>OEjAlUX}nE;{m_0Sribyr z=PnC=hct4FR=Qj4!#?QzN{5c@&Nm-MJtkDR*gH`kq=c-!z3Icu`;dk1OH=y`ai_Rzz!R}-Mh880q@jPMRX__A<6 z3_?QCwc`|&g&juu+;qh#p{0X36qHc^(ce2?@`)-gJr{)ck!hM#x$W=<#Sib9R%|R3 z;PYObm1V0!&rTn=CCZ+grEfRc0UHrM+fG{Pg0E}BhSd6cTM%!?Ol#@;sey-Foa7DH ztPaO%*M+MUKBHOqwg=(3kmp*}^t(JjQ#g5B;+aG%_dawFk$K3Ble#qV;TDwRj^^hYvJI^~+WC$L8wmkSveffLA2bVos@#YGgFc}Znu zCGG()o^9Fvla!O_74r+4jA02oqG)W-v4^jRzmGLk}gy)r&Q1f+o)~I#*UAVC*!xvDtI$> z71*Qx_hm`i!qE0S+#?qKqW{)u;FxNki<64a72GI&TK9vmovPeAj+=>H=YyAHyTOWO z_!ud5_%D4^Jxz=V%S0xpE_^@LzjLniGTGDn_V);Hn|@og*T3F{Wyt;So;%TKp*OQa!waR|#qRG6)~L{9*Zw=OiKlgzuFT)>_NYdISm#|5 zt$^Q&hl?xNrs_j1x4@^}@mi=5_gI7&wAqUG1Y#k;E1EV+Wvq__aP9L0&{>Svqz@-fCg8K$ zBp?gzr{ML(z?v8YjOX?KMnzj-3;S$m2bqmz?44ha`D?+z^Mp`z%Anm~_fA^62iQst zu@Lpju zIzleH4WQDH-HY3wmC_u9n#5~>Gv}b(ZU^NI?-N`xNgX2>m6F&IXvZEUb5)a25s1-A z`=VyT3%M3nO)!a)i}3fxw@7_dMWqEnQwGemX-ya`#u!jutaPLwy}_Z}F7;D5#ZO!_ zefMq!6a}d~@ar~kdQgv|n2XZ#5(a>MBLYxXRkp6T(geTLH9&z{hnQQ%FA%a~1(sp? zfS0iK&vX~ZVB^E_pBcx2KLobY{MvxzEPZ}SfM*nJg8>^pxo$$p!=g|00ON}AAewoo zF1ZRS(h(OwS@kn5o8a&q?&gAE96&woqQE7cc~}H=T>~w|xbZp+ak^Dr(Gy5?jy+mT z>qGL-1g;N>6sVP`jB8pd^wF5;?Ry}akM!3!>(EO@pwM%14>10>SG z2?E>gp``Q&#K+jcenPzw`4BFOnH*cuRzk~q<}Ap63D&dkJFP4b4*FZt-=xkp*n`43 z4i5<=JCcurvT{uU;Yw zuhhmnI!sopeh6g-()^bT_01QpZ*g8a21ulWj$3wKSZ-QXk+Ks`%$k*H>tARk-;kiMGgZm4q z`ty>?<6LPVGzaFgZQ6)PU<-Aq!T`ze9r;N-4zeZNN1&jhGFBOVG0Z)~&&7rRJfe@T z&kZCxU`G;X%u&gpn$X&QWi$bd?81Arcu+u`W~svQ2%gzqi zMgsaXODFOjAUUOsIBy5Q|N5dLm@asn*WI{zXD<4HhJD;QE@H(6J6>^MtuxNS z;fDSTzdPuqC>944!crMjuILcI{X@JYf_Nz4;4JY9EI^Sxv#put%a@=Q4~70oVKluc z$&(J_2PJ@zsmscGz^tc_rGx^4(9N7dpgShR@`ci#g}rU0PQ3t1CBUh@oIeP`hfv9! z-(4Tz-(27kG#Pb2`ue~^cLonpdR07kVR;=S66#PWXx|nY3t-}~-9X8PsFiLCk>@|n z6qLTLMGCQ#wET>@$70-CFETFnhXPby$D0>&QkB!t@D@Hq+UQMeSyW-0%Rh%)3G;Tw z!jbSOQWvQj0~OQGvtKXzO7DB>YA6(eoq{x)&|-8DwGG!l*lHB;7Dss9IS!mEs7ax7 zZ{2`uVtrksRFh*1gx>e_OLs)^!FGfV`yiG8utbkm%NyS?@K$eW8M6N@1k|fMgRg4ujIB0^e;ZTkF zA{I*BEN;y%)f@h^*)^Ai!or6ioABm|P8(^Y&dgq(qi<$AUeB%VJ0Ho7-z$dxu7{5?A2BzVHsIvw36D^8k6}(=4cI9~MKR7z9xi4!R0XKd6iJ z;hQ+cf?ucYdLViD2ySe#Pu^|MFOb?Lhe58^;pyK6E}w7|H*xn zwYhn>_WWK_of}S6}gL z`DNj7!D@7NUcDz`!%;7eW!Ja+S(Nwa@oW9K>ic{z95TEQg#ZnC*iy;r z)|Z*>5^0w_orn6eSEPm<_cOKK)6LKJK7wrFU2g9$jX#|*p`IuVTeet`9z<`PeQ(YJ z#xN>to!#-P5zuT<>vzrU>+kCW^wfoINQNkH{{S^B=qCU#cLJO&valRdAV5##pEcBi z_pc%!lpq`*1Kuf1Y5i01cFrM)93I?@)vXSz+SqdjTLXU+kfW~6)l^q=u4mDFieNer ztNi+Q$gGb{mYt$|cC%@& zUcEqp%77<9R@xERj`Z$5PS}p@+3&ucIg?K*WISsw;PvU-cMCNqZI3F8>C^*2j6bbK(1ERe*OE0QiG$3 zv(=rM&P>bUerf4%Y?zFJ*3-cCPizE-2=PBN+@4GV7XW~r?ubwyEGjiSddsaE)x<4HbXf)^ubT`$Zdxy5JT56Zcr*>qVR#hbU{74SMhZT5s+S2YN1K|y{o+p>b9~}N z&ywGL==F3H<12==^9U(j;0hyh5z`iAsw*E5qt5b^g(1#TBZ)@(>{ zH`kpjs7^)$|G4c8v|Jt)pMQMawP@sIi#deR{LZ*VOEC^wTvNmSHxQqkRENLk;8*Z? zd;}G{PAl+s9oFxfBk|$LkK*h90G`BK#g6&r>r%*{bixiQbgnN`Q?CmP*LKJ17jcwC zH0nqeNOY@-cRrSOfwpCtIu4uZ)!DPNvJ{RJ<6C_&$l_x1*1(n1NdbJ4XywV1RdXZB z{+;a+^dT|=@W-YE-2OH`Ta#kj<@5MNlHfNyYXyP?kc zUuPKzi+1#y#!f(b5Y_zQ`|y)4Cf#p8t0I;oxLKDfx1X}}e(6LAm9Bc`{C;FTWF^?{ zo$~w4dHjO{V50b(mX;PIF@Mnp!Y5C1>$ktoPY^NvMaZ)K*VY054%Y;}(Q}>}@wq#( zu4v_@rG*y=-M_7kfr&$m2%p?Pqj|Y*f=2L?^q1)?ilI|>nGKtxcZ`>CM4J`{_tq*F6l&gs6RkX+>cSQ z41YnJB#%M<^eZ!C|FD5RjR)UHeAtQoF0qz7inVv$M3AfPc0BAw*x*?G-LIh>2@`Tw zeOrg)g8Y%}JAe(Cz+4UVZinj1r*`K%1imsChKbQnADn&f4FI|2#@Vz|t-p7Zutrrp zxVGYPO90VUFj*y zwZ*BUI?|&KIM2ZWS}>X_ z5pP3c3_1oda4%oLuVL8ho=6^HR*)vz&Xyz|kn&>g1VNM*>WmuWZX5fH+YQ1 zyMeNh;dbr}4rsmQgxPGhsg2KxdUj4u4$zu506@=1`7P!@$QNEDv>4Do6M~E_FI^DH z#yDp|KWP)7M}ZmJgBr}HUyPNS0K0q<2*Z8jyfYravpfm6x&o4HhXl&Cs`W-hQq+;D6g>a#21kuB_-uiC3m6x`(Z;Rk$A(-*#rTcL3F$%J^@-f zgCqaTWR|1E=zanExE;`AaUUD~sYLqrWBFs7)v_Ba3i4+PcAQ!olRDwvf;L8Fi*X^j zZ$3Vh2aEr4V!uL&W&#mY@#a9t{esiBz^A)YRK)-L=G8tHY4cQr3bQ2OS>fH4;0f(B0D;2RSYpv#T~Fv!y-wz|rXg=3b}{t9oy+eeihDUJa>?r!fwyZf!%-I9bd?_BSL2~)oMXxy z(bX*49>~<)0iOh7Hnh0#D5S&HwF%e=u(7(;Ucc_XPC=h@P|VvZa{;@S&64DatC1rG z`MkpbF7BK_tJ(7z(oAN4{l@qt{hd$CH47>yAw9;TSXFqN)GT})$3*uF3GVp6_c>xm z=i*4vavd`6|IFJ8zpk4nRI*80yC}fv>VR_-4x4O0GcMe475gtRn{GE=XA>7}0;68n z{{ph(w$qq!8&ZO+F@{f)YE1o>1-wB`ZX- z3GrE0Xkz^e{p_PZ!13vaoQVR>GXVC0gnYfQD@ z?S!p%i+43UE!u~fY7eH;w;^&X#&4PgJ*nNe{3n~~IlcII8Z-Tv91V`HuF1Z7_3Fc1 zO|q!ZxA`SeRc;_ngI_6QBv4^EA@G@ATwu25N)9_FY7ucgpNS;|f?|iEj9Pkq&|pBbcVFLQ=k9 zVq4QkXQ!E_&+iFHrH08^e#_wNplO$yELwxKobLDJ?qOQ(Eg|ZDt*UE^9Sdc4ZZ%>v z0+)bg_{-Fht@Q@%!Dj3I5fueVb@MFLM1X7=UL$?`~_BD&??KP*D)L22wl2$baoh+O7il?d?u8XdxL3AD<+h0 zd0%n~8TX?Xq&&<{4LbGRJFEPjk&FI=?T&rbI)WiqVfc_XQCC6;Psg7E+lutP)O5o6^C%-w`#mDX7r2C2+!esOq5W% zjS!=>c?N5@E%8$(%C z`dz}3FNA{&j2`tg%t-zi(Ne5h>r3XfOLj*`4XQFU(h8Zbbbux>HFa~YIN+zULd;DO z1NO!*>+FLsuaaqeBDT9*@77);d9PtQ&P>je=UB7sNIBz$~c4_*M;*Mw+&TO}8Qrmko4${RC+RMz?X&z|4kPAD=NKJ}AZzagj;w87d`Nvo%}t%9u5@hT#p zeElq-Me7StC3tddtp+3xT<$rwt+JZ*y<6b1ZQd?g05+n);;8AE>=c-G+3%5ijyrJ! zN82}ay7_8TvKK{7w@W#9v^_e44NX7E;`=`$pLmH|KmJy~PySo5x960<&O4ouS3oE( z5et6V<4Xf6eI}2!=0hKZp)`T7_O?oTA)v>?RQuO*wPGtuu5aRNqo#>^W^*s#+@>-= z)xxrhz8mt=e-QPi_6}LxAnY!Kgj7rzg#?1=gX4>WXdr%QUb`M&CaJ$=Y?(KtsAS+Q)$*+FJ;w7o|LJNhI0d9NKRSC~6It=J3>Mj} z*8LLJaW2lpm}og;b#UHgiB1*islGK}ekp6VzTPw<)`dpnKB36>2Q(xt!jw(m`BZJi zX1`VmlD8lXDbQwZHl!3wp#A}zXr>Eyg{xJ7qUc!{UE;dGD!6`?+3hn$qPhoxyW>3E zVEkZ72oCRC(UE{C{27oJWa(*wJ_f*TKENaMA0>>RRyvPv6@C||+urS(g&To{*>XkG z(aB7|fj|dJn8*$u@GwC_eChDPhw_p(o3Ty_E-hi z29(0sdB+MYD=}?S*4xAqMqIzts`{Os9cLf4A{pm{MY7QWik|8do-R|0dmsl-+-pm8;t9C>kn_=ZdIpB4pqI^LV1KIm4PQJE@TeI@m}KegOLnwjw)Ifd zS3f4J4?TIiuLDaM9AMDzlQqz7?nQb)lK3zlMB3k_G{3CY4HVs!lzyR_e1+Aw3(^s+ zF@~N3w{S>c8n`LWeosf~jE%Jn>Q{l(2QsAZd%q*|P8yvk$>b>xT~j{`JO87(FRf>h zgYN@kqKE2;hZ=HrOj}H6%cGQ^FI0c$5Ugk=3+dH9naP)bGRX|>X~QgGPW1>r$~-aK zw+z<$LmZRP&2F<_5b?F3>#=R}Cnmh@p8Q1pxpEyo%P^C$v`|Ez+~Ix zKg_)_yPb=|Nc^UAh$`Vg&&NcJom$qj?lJ1Vl@8Hd0y-`5a>6aD0P8FSN-5&shjU6v zNzpGAD3Y-LIC;x!GP$$(NgYU%$dPvuqS1H{00h^Jv+UF*N(VKm_N4 zakq8Q+{v8hd~)SpEnhcLC4tD6hS58-dht##Ps32R0e)v$J5>Cu7nOJ(!+o|m`koMR zcd;TR1Q!jn2Lf&+ecu+D>7YIJnQ?Ki2j9)5N4+jblEvQKiZ?O8C!r(Rz&%7gLF`!A zjqdqeYJ0maBN{v9vaFw{Q)A()95J#*0&s=AS5QC^1-8jjqh9idaXLAly^SPVkbar3 zR;(V=baOn=;owh?hs!Qmw@8r^JK?|dR+==4noXu)M#BD4S%92uIHEU=+OsdnZi8gA zGYGo!^i){=T0aKUncd2lnT8*ErPLX$*v(R=U`oq#&f~?d^U~`wm{mVj{H@?faoS;5 z(@n>iqOYVHx_3sQzDukZ+Iq?3Ks+S*@{q0!F{Ck|{)H1e%OOL#iY)$a0F>eM^^KK9 zW&XQ_bY(?F(~VWE;q%9qy8DT{3M{hBeEG+So4NSre$9qo>p^o#Zh=geRc~0}r){n! zS%=?x7tB-QpVlMpb|d^ygKqBBe11s#BwU;5Y*7;*207g3yWhp$l+`{&Au#`m2;Q6m zlCN;P+RcO#W|)zcFp$cWFiPPNa#7==ZA%d#L7;JHG)RoAP}uv(fo?B3Ci+LYgq5VS z3WsV64XY#Vks8{)NNCYP~vA4(EcXx9x$xf_+5d_Gff zOdw5r-()3QYo-&g%=XfJj$nSow{MG>%v08Au&-}>zP_Ty=<4(l#Iw9;G7S|$JErUp ze)yB$f(H!>{2NufFr%?nU@;6d13@B(%rGM4ndj<8MwQccA}jI>-}*87ewqw11eyoG zkc8!mhu%Kuu196H1WNDUD_;Gf-AM7jgLMpo1ziBOU>P<6~JRIz4Cv@_4iv-7wsM;gnv-4GkA|B{HY3Dysd2i>@*3ht$MOU$*Z) z(x+FN)i)75hxMLc%Zt*|9O{;k!(PD%1}+iEpmil_`VzSfj=3kQG7yts-}S96i=b74eS4jK?*F+wXml5?~Y?8U4+j=j+fx)E)$yY;Hs?dN} zPB(8F={);zE`yMc{?XHvj-bojHsaD*N1H&ZuatX_-s9_%eb5kTrFpB{N&3t)H!>0n zhQ}NAA%F;j(IJ{n*(0Jhx<%7t*lO1Q;wS|R{j`6Ry6A^-1?hUbDhf|nX$ZfY8#ARp{Q&mD-4{)$gof*ZeETI6);Bca6L;d{5Pj;R7QSrAq7T zYsW53U9kt(!_M zxWOG|5qBf}ObIgtp2dmiU(70IxK$UzJD4_)N|~$X7YOE&lo~ZHX2%kM`eR~%o@yIK zkD(>--7IOkoekwB!jvDL^{aVH=I^z z0^tUbF~58kD3rSJ+`LY0cLrPCR!FJ(JtIqlAsDg zSYW4Dz`gTeo=&{4G+mUdXcZTbHVGs>nu+22&O_DhK(RtJL$i2uMOwqCL=m$UWGphaBl97f8H01fw!d6-v zwsXP;F*e9U>}OQDn2L)YT2wXD+En7r`pBf8F2IvXPEO8G{qc`WQc8-pR6WDA5O3C` zo<(;NJVMudjnNJz1oO%HFp~Pu5?~>dVVj zsBRdMa6%B*wJuqvzrUupw|8K^w5Zy@J}JiE&ro60qoNJ8e$gFug^P}v>rCR zY5pRmF`!?lj_MHM4ltKoCx2QE(^Et~21eY?{;{P}SRqui^etmi>i_(x{y6HpB==CJ zgI`7x>z%^Inus0Al*2EGAY9x*)7VIFCg>PT8mTx6(!O(-s)kWXCm5~=%}qt){4#!( zgA7l({tm80u=whTR;4Fgg832#gOwGsTF`JzBpK3h1R6<7c=TnNBR26^HdcdK&DFCtjbR*>zhdQC-wB(%aWpoaRiE3<-c9H< zY}ux~X5ff;z1x~QCA@hz;3G zX1^c>e4$_EFjjn{Ce}$eAcT}?IKCdykz81h*`@N;wi-2JZGW^_`k$F+yrU&2FqHLu zyFxW9h3(&^VH-})+)VQoZ@kQSfVbC-_Bb)#(O3jQJE~io`N82Dk#kuD$+&<=A5m+M z*=Tk1BUjOw0>yhA8)l0iyHz(xGpIHv2ws2YQVCOrAFl3F>O^Nr?c=Q$`F`5#L(&IT zh0xa?Kb@yHwzdWm+&M|}Vi>NSl62HP_ip8ADk~SrWMs`{n#tF8x~y%zA@f@rg`XA_ zlYKB%T++=rW;0`=9#-Fmro}d|iGBRnnm5GH2w1*|OaF;x*eUu(-<_5ts&DRlY{3ljxc@KJ;CF;l&s?MKooe&HN?MJ?$HvH%LlEZP{ zUwB$1DM|bE)%(6`mfBZNo2;%<&sRXtMp`D-?#UjR>j6@!dbuH|W zq-Qf>oMX8zq|4R${qrv6hcY1)y-1K!ZNYtstwdMo>-fM-P5KwE5>cX|2X^pN%``n^ zqoG7YS;5C)=GCL`C&BD4eEaA*(a>KvN*~eMBDYE-#GgzrYs`NQJn$yfDeazS*>SQ_ zV!lxaKi1y#h?q`yu<%n+k|V6%=&EOSeci<6)eX~NpWD-?PeIm~pDk3ahwo?l4EYbo zUi_2gCMm}|vb4dFSRIBD2713?{(Wp=jep&9s|J{gVs6wU>?qnakO|&x4(|=r%iw0_ z*+J|1hyW;PI{(t}A?H{}ygKt@bvT=fYV8tJ_c(R7k$*& zFoK#Old^gtFHI6py$l{)bvMB@il%hjS3m6az{kgV!?Tl(`J4BNUOhdqdQq>*PWPZE z@Sgr#r`3BUB)<3XtVcCbBUvLGJiFrXPbLgD7nY=O!Z+cVm;~I|H-HcfvUuC_0*rkF z*ulCX+z0l^AGvPLFDA~^afaVyVm|xR!*07|GMWZ#RPNVgzf_GBW4H|ygR%V9|2b{1 zui=DBoyBis(uyuaXQ6U9$R~w{KSTHs`*PWVzxOj8r8s?$TgG{Qz$QhPs$aCba083Z zw(2XDf~Gg64JhTe+i>2uOAsAD7|&AZ_-D7o;0`L)zU{ks+Xp4Xj`xTbR}HRdv|L~T@8xjT=F?C?|=M>TzLa8qmSX>lPC zaV(nT=JNQp-qE9{0csB;&nT)K2bF{ipUjdT$J~#!K zb8&J~dH=+rXsV(;UbH2mwk@gE?~i554wM&wcc~wRXeentkbeXPkOm?%xFEaq!~P-t zAIpvZTzvfRa-~D(4~W<4KgPk3_b#r^U{iZE;K{+v291N0f}P^=K}d*#O~TsMMHTG) z&cWW%0Ro1&Qt(l*y>qa0aMo}%H3w6$NrK(2&A~Fxrd||m($;paU}qq;Gj#<^g3TS? z0|2sMYfCFv3N9W_3N{TlGgmK1pis`<)DlQE|H|m9nAw2MT`Aaftlzs@QSfsEkcwc4 zCGgC_&cn`6AtHkI4~L!^N15o3FXvN_$7O?PkQ69>*Y*q(;SvW)d8Ym1#Hr+YMw(7k zNr^(pfzug4yFt%WM7u!rWed3Mi?P*gbiL3()qvPmihWJ-U-aF~!uMl$R>ufvb@GFMq z>+43bULfF5*hbuc;n^%{A=YU@&<*-%sFSB&F_LKb+#(bm!NfP1RIN;VJ0!rI6@jkq zXIY9S(}tm?`vgJ(x9Fd@7PCV>0+WZGh_c2;VG8w}RVm~G2-fmz^iIP`yYUir-N+Q= zj1hN3JVeJ2?B2)-vZ6e+1^TH`7ROHz_WCM=!3vu87s?2+Mr7+0pu7k01q2~hxwT)a zK{9l76&GO&SwsdzOy-0r`$DMF{_Y0}VL(CbRpzFNqYiNZjcT7(gQRCPdcfMt5Bvvn z5xm6+>}irl5R7qpi{lpvUzPu8Kk>av$KZL`uJRDa_;b#i-b7D{6$>Q_<8i>ljAg;K z1i?360VilqLNTYKPfn7^@`gY@e#@&@3AQ&n#4pTAs>5wbVxjLRy;tXmHBa=T_xyWu zh^7N%_=*s{6kfG$Rj8QF3H!E^b@03OJyvl2sSRkQ{jz#ON2Fcb$#O@H8Nq=`G{}NE z0OAbGfPPF5!S@MT-q9O=EN9qBe7?!@RdS4U{4&n_`;qV#ITBLp$>EQ`i$_4(XW6_l znM|#fj%64aoLg%iO&?on<3D>Xn^{e^X5CUhpd!GQfP6$Hc8;gD%>5{Gb3Of+6Q1QC zwfyIE^h9J9^5%Jv=XIB`G0CCj7Nlguh(GCAuC^*vTSufp!}lcDyyUxcS&{?GKHca$ z)6{Jyo*9f7kM)~J^C5)ogx9>Jf7Pegc*KLRR@4YEVz@p9LXN`xR*`6P&%_5h2F0Kk zlA;a@ajG|X|ISIbryp7po;Yw$R0vOwgWkQ6-#H27qeKlXV*aaS`E|VN&Fe4z@sFq= ztF>|JLf9&?u?eyoF9CA9+egT9jQ7X`9ZiqOc^&#XSZKR9q8KR|zZ5Y%A{bo`FL?Q& zNEPPKNO-OClFK!sl{Ol(dUerN++QSl;$LQ~!g`WTWU))&r9cL2ZA(j7$J=)-Sl6CE zg1UTpkYs-c9RRHZbSeNe+7g&Qsw54emw(T6=2J5O z&P3XjC>`Yb4mT>Oac3T&aAo-dMtLm>RI$~O?{o7|-WJIG=9&N_u3}TW)ar}Axy$&D z_}ZA;I8@W2yZ4(criy-kEmU9rbR-o2L6Hl4E5)&O&2o~&?rHp{t?O3(;@L5MF)z?K zjz#Q*Pk++UfnGQ}-IkNs`vp(K1TrIct5Q$@7HQXI;nZWlq}&Sk)BaoNLGh{r?b&ub z0FJO&=r{%&*&Tk$V`oo@s(yG1Tf#f*PhJHC`s@x~^cSJJj%$xE{?kAo$Q87Xax@+; z4s`4R05oQ+k!|@SI}py3Y#gnOI6$G3|JFHBtp90PioZGT$f}1R9*2XT4kq(88a5lw zNj3%UsRQ%J#aoCa)z0XEM?zC;I&)ojCxu29dF$hlNF4BDYP8nv;zjq|>R71;hKjS1 zp4e)fJewESPo^QQl@rmS$5OSGDF8a&=p^YpQGCm<_h4UpJyM*b2+{Hi3=BD{wr%@N z@#j?=DG%yFW{OYc`Trh>J4YWWxBQ0c5zBdK`revw&%dQ`7^Dhsy_<;r=sy5O6#7m7 zILzk5x30OpMcTN4^faNITz-&he6?*}omFP6N03;&2Ca#zg)v{Y8y3zf>;P*Gnj~P9`J@u)1 z-Q{$aPey9wVG8;Z5EUWco`T!yq~>=M2kEycK8|*K&6Ku?3)(G}#7Um@zF!Sai-*GB z*pj2hq84-Pd!i=o&9fE-tt_{RGsxn_mt_OI`nHRv4)5x?68pCM>?U|#bp8OMinjT? zOSALvT2QALcDVQbXix)R+%KlSoM%~*ej)~)d*1x~1JMf4v%1Uiv4TcmA~={I`=|K7 zCA6r`70tt8r3=FZBbCO2PhtAWG`n+MQU6b#FoXW7a1Q9;3GB)LF3H>KJ3QXDJP$Kn zBl?K{4|YOY-X?DmHclQHVXw8a`mX%v{#Wn(eqA+J8BG{+7n%HvS*h4hj@9vrz;thTwI!V~V6X89=U;uZOR$Rw(#00`v8r<1{J_%YoUS6%8nBHopL{UCg; zO?Fz{IYHe{zjKpRtNo^`z-z?C&x^@5-3CxXDSFFf8#zK91L$5!{qiLzJJ3n0X*f?I^N{Q>^094k@>&1L>JI5k_yNMJ&$;4;R z{xuYj|6@$4gY6(A(vRJui0^E!hXGgvl*capDgj}R=mKoVUz)D4j zR;0=_^sHn0Cm@KA`10qN&2Lfi2K_xF$&CJSD&D&PGT2Bm#RjyeTNse;4?5yiNsZ_GK zew9?YW!5Ufwk?e9=}7098%E=j2O+6bay!!B_%gNH$51+-wbsdL(!nO zbl7=Y=zYxqf@Fr3n;4v5%%=QgEana8@X zuW7@=BH9LT^K^3`&r7p!VKARHZQNOo-!^GHzKqz`xLqbJn#bq~ zjl=w{S{BoRwtj~-m-nj7t6d%{K&PoCq1`139GA_UB>T=vV+_2`&qfEb1sr;b7Krn>UHPB$vG?5U?k!BB+ z0!qr}(X;mKj*vObyyQB9>8oCmo%g@RwP6gSgSg3-<+-0uBuXK-pGh+!B#XZ(LLtU> z{w6|E_T}LCUc>V3gaVLzL*;{`lYA^>nb)d0Q(0>9INlZ^ir2K*UV;cF|3JRV^=vA3 z;$Zn})XY!6Bw^}IItNQB5ne$hkxNCdG4~ha*?3!&T2MQ#u(OQTNLK=(ai#-zS;4Rt zy;Oz|HrDHAr4=G$7GaWZsihEwtweayRw;x$N)p+ZUXKZ@u{n6xeVwtUNwQt(b)#vA z7qU6XNMk1jpPD^X6~;;)m$~MZP~(TUNtlz-6d6TQHu{EWi0oZZGC6x!A68 z%IwjXIVF0SXV4&3@b28Hxaq^*$)eMb5Fm888Aa_47YsbF{k9#duEnn423}8_bpJs_TH7y3l(5A}Q1n3KE!wMM$(I zUpE^eS2dCpjU;s4tU=XvHp48C^kUpwTBWFaiEKLWnFUT1r0gIedyZRK{U-=gj@^=S z@7rRFK;rctt;4XXJ;0#Z#8H7n@l9=BG=yvW;y&AN zS3?F+w%l((hK5lvOs={Oq4Rv7HY0gj=oPRsmk>jCg63h&^(H>LMT#V z84BeQPS*lLQschA7=#dK`47xr2iJzl==a8ib1nN#@@sPtK3@!8NW)U8kH@^4I5p*R zB}HxBK{>+-_AdUag0e+9yRMEC`k!!NhOFUCQLi6Z1?APFM;;89Ec^F-hygc?}53xBW2JVMP4w)y=P z0xsV!C|$<8B`!;Ly+s_(aqd%Y#bdvfZ5Cb;>EZy1fQ%$rc|OF%&#{jhy!6~(pvZI+ z-EqH4VlPI4Nk0MBvB9>aOL!8f0E506HN5*;u<&6bZ@sCb584HdAkKbrsBo4^+fy6Q`>vYk$*fk10%zc%nkiHrHs-U9>s%zOQw{p5}4(X*>Y^ z-89s>qlly44c~CcVz0*2R~Pr3l2A*JbH*IGDaWM7LATAua?oE*YFrk!7+8$a?f(6z z*wL31_t6Aa+FPlh6arGu-@S;u`O{DxU-BEnR)1LcYjR1uO6MVUCdYWPa>#6`^g-Ct zEA6by((SxXjebO>sq!y&4YKVK|8TR=kSk1?K)iZ-sxlS_L5@AmT@#)kVLB?tY(l%6 z)^t((@il)V@Wpja{sCocIw3JYJFvmjFqZVMhL`}W)Ewlc5I1p1z$+`#x#vxYA9Cz< z2{(~;0}uXQ?@#cQqqj>vIRloy=)e^9Ptti_;T8x9e8sn_Px#N0@&6)uMEf`Kf6??b z9%=@rA?$CM2On1ugdd4HKRpv$u4d?-S-}8a<4oineQlLaSh-d3N{e{?vzBjbDegLd z&*jamH2t+?@E(bNs{3MhE^iZ-bj#8j)y;weEDN-3nm;!?Ha<7)ITk?_y{v7jo-#+JA-o>O*I;#8#59 zjvOt+VIH(RvXH^a3ipzeLpO=IkKl9iNU!-Ka`xS!a;5nO3W85xLhs$r=Mv$qs%Cb9 zE1zA?!D|21T zT;-RkCHaAocH*9Axy3_%M3GZpGs53<|Bi7St0+OO_OJICkO!>g73 zJKAB%gQNc!Ow+Fy-KXp?7R+D6HIIIf8-vKizx`%1`T-K&d+usW#BTuKXnD86*ME6x zmA!)Gk_ab_c{0BL$UyfMKV0$7VfFVNV!Y>COY;GIj8k77U2pL7*88k?(%%E4H&BAur8|U}pKJhwv z8y&bVvIvnSuZ3AdlZPWsoAo_vyAb=jP;c;=^-0PH%gFt(C((EZ%{ z$^EnK^7j?=I2Ienm^K^+$(N^NtZ}m{2nPI!kw>l z+mfF|kvsVpZ!0O|q6n2S!af^AFUjdjfiXP}rDCtC{=)bEZOY!yQ;teCi5bV6Z>~8X zV#X{v8@?ZS^RUPJ2MDj(G5#ow%ZiM+ZE?hQ3K#pKr?e;iq#qPoLZtad;-`nmB1?o% z8}<=oq|t-n2{MU!q^&s2H*z$g3SBT!yQ7}dC!jAD55CjmwgyuW=Y7Dp6uZJI!%bLRI-CnPLH5TCOao8v1b5Ferp%F7@HTU^=WZ9Kv)Z*#vU_ppc06+eI|%!t0%Vkfl3vNeCE&;JA?DvuHm)&JA(RGXqz_ zFUt6@b3#MZCu6+BZNH$``+H~X1jM97NVh=P@tJ#z> z!}8pb>&fRVwTm{d$63lF(=y^93RX1UiPAQRrKYsi8@NO=9-c)zoX19JA9X+ZwH=}p z!Yt%ucnR(Cmxw_U<;U@hwpO*}%lSucZta=MHKr*ipYYwpv=3(1*OTgoCh(>@;`@J% zq@d;JZPS*-@ocY?d6|p8mY;&>404&FvYKQ1^@w^EPgNlP9hADz%gkJKy%Q?&W*BUw zry_E8#`23ZLzeWlO_M{cWn=@Y9#loH8Rmb(;p~d+uF9k`RZ$lv!faW*xxFoz_q^m2 z7$rm(4Z}=dXXJMd+$JLSGbF})t0Po~soCu_zPD5;bHt*Vk-r|4?JM*nVNCp z1&TsFVd>Yh{+S@%vNM;Cn4!X9LqhXaa{T}sUDU%y5p6(i63FMxB6mo}i`AdNo8{SF zOdhe7MAoR;6URZa6Dw^(#|pqxTAOzcs%z?LaA<}5M7(Rxd&Azl_!>-Z1+EI~O^vdzgsmslAQz9*oO_3;h>j9? z-;|^iDd8>J44E9y)J6QHjThNx8|@fp%BHo{(&Tawr7LU95PoyluP(ds4PiK;`US*e ztvU!$^Q48ug4j)>@4rimozx7k(Cuo(p!nlexC^4ZQPS@AK*mCc*Y*y(vX6f z6Xm!@==h!4?DFzf?`$9?d3*MZ`T^)HNodom2>mtp_!7MNTC=7#Qs90;PC#H$0Ua3{ z*OGtqH4p1VNM}ZjK6D^L=m@5N$)~}i<{KO>JS8t^>DgP$PgT#rn|={@dANx5r6Hpr zWNqr|!nu#f(|BL^2)jDwg|=A~tI#K%#?{0Zm^Ck-aqJ$XtX~gPWPL&xDtiMfkuTZj zdYypn2v~cZEww}6MSkMQb0<#A5F0|4T_(1~>_V1GjI%FAa>W&k`#1+|*!-E#@9p1g zz+S#@+|2%-)6b*aZ*r}!4U@zfyX{6!rG)?52gTu`sA3Ky*qJh-Sg12QD8W*C>g@s@ zz=ecbLYzPHXoYyp{SO~2A!k*L@U(8|u6%o9qLJo>QAgW)N$#o715*Q|XkN$Z$moNd z^axje8?W2BOol?%cwOaG)HQ&^xhbQKE%9cdM;&Kq;em9t=_E?@r3>4&Li=o@vd^F} z@!p94sZz9Q(#KbSK*ymgwy#N4S(Otu=$588Mbf;63q3-o3+Hz<;j71C+bT{{#L`3| zXBvJt$vqvY&#`#m96l~C6l(!_;Mw|gTG(EP4JcsBTP~M z#8r%jo5a7gig)a@%U^mF4pj-oGIdwC?H0ZROi(q{qL4pN6NcEOi|Cf4#fy z=y1vA=oJuM(>J&7rdKmgY~C&nS#lTl8mfBxX=~(awc#i=h4fV6o{P@rDYYi7HsqNb zY0K!PQPz4|1|eQFJwe|+x*;}cpW6-L(+(W4Qs8 zNnNUQLMwb>fraGaw$J9qazph7IGdkoldoq25&fu@7!q8Wsx)4gf@BJgN;XSsLKb}R zT3Y7V=H`*80q%2qx^j})43E7+_xsKj*qnUl*Nq5T2kMsdXLfp@PtG0$qh|#LoisGb zForBe)LVWX%T*@3J1A<)^MR#n=*%-{v+Eq~Mcm#*`#r4Nn%yFFkYCW>YF#peW>1=T z5^DRNo)<68)uz(hvUze6lNzf;A;0x6Z&d{D;d`gt2(55nJ9V(0{^ZhMYvZFqSMOSF zK2uxsfIOSYL<2)Rs=XDPvxipj6gRYkNnPx$8&ySx2J%P`=d=!6gzeh>@EliKJC*7m z?&>7hKPNjSD$PTT@EzcVEzsUF-YOm~nu9r_ABr7gU!Dpr-?A2~)pEoby2gOSd-r=j ze$ZzbfNu(Ue#R_Bb1zTAzry0jo*b9Aih^T-TEyJOn+ep1!`)6uK4pRJknZdvbqw-^ zc8Qb={Q&ky!hcHW1vVZ}NEF#yUq83RWnnEKwu^|tO!70bv1AgYfn}w728RqaV=mZW zcl0KyckL@%_BBP92T1WQRFnw>yt8kI(fw#lJzqV(H~yY>cLVo$@DckS7LokDsqBr1 z=5dJu_xx@$b6{Zk+3c*FfeGH9X4v4(w2%Ed(>}kWsQ!p<{~Y2OUSD*qOPxwHhMz`V zw?$*!+spExBo1(tR05OXw2{e<&FqW!Fq?Z_aH?3YU7?--e8WsbV2zA38ckQFj+>yJ zvN(%eGfN_sKFdMFvUa-4hTb;K+&zyula)nh9_i-X>|skT4;+H$D8cuFb`1r=Q=x6E zeD?w@1@+zQU+S?ZA5!`1V_V`o?5PK}wF)j@ju3n+c(@s_=rYdv!NAZwzkGS))!dbB z{WCpiny9uOohpM%I3bzh@(ae-`nz*S`OM}zFl{~P)NGA+NBJ@Ybo7#h^5JtFB*I$i zUkp(4zynZ;5YkT*7Gi3wpW?&XH|HyO)TWQbmM9L~DdhT_orkpD$`(!G`dx1??Vyn4 z0k}<>E!JekcFg&K_Oi&btEPHc11&r>60FRY18lPu*UhJqMZAT5;2ml{YM3RM*6#}u zS(yC3V3kH%y^GZ3#(fJZ8HFxC!w4ANQ`sqEDL0?qqny(gWPs*N3!wWNq+g}U7;gKd zmvuCbFD@ieW(T%2pyq2#99ADqomcH=jTUSv>g9K+2r#*c_K&^Rp7)a#2>z!VsM9&& zxai+l+GK2t{n$kmeBKGXv4G1A_r0yZgvnfgdiy0Y{JOj*+HKh;69SJR%-X-@#55aj zT@)vPb>>*WG3&2^YqAil1@R^6WnN|$ZFrIHxY!)zDjKKSB$HB5YJ$2>arP)w{e_XQ(@jwG?Y=xfR^*&xRm zapcvztXR$mZ!Tb^s_70;pvRUshAx3%r0EPB3?@VA;oTpN-4l+gz48;fzE8wt2}B&X z8J1p~cSDT1=C%cADQfF=3#hirCd)hX5J43;6CD1E6Ub@aI5u!CyLNFp6NG>NOpvKa zrKS1w`ht|BAo!hq(k8NgdNU{$OL4SC6|VDg`b^{EwixhqGx6-Cs6ov;IE*7P?e zSz4nw2gmB}=_^dq)m_?K=UR3b=)BaCR1WIhF4eOGZ#I?c@XW(|k2pWLse_7nX20J3 z3vbg=dl-|P7l#`6j`Z7=(ydb-puL2e8%+7!b{zcmtY9z=>3pPCWN;YEew^ZN|3IsDBMKBU$Ib^m4?BCmJR0HCIJB!Ka_G=7TA@}3e}d9 zyVo2>i+ZhgBpt?U*;S_XCRw|1SCw}}EQ8IEyxRvwwQdIsoJWVt&a0PyQIbjy?Hp;k zKxn!vv_&wW6ozV1RiKi>vnTfRk9fy%1dUV`>q}*{>$_ihDK>Av+@rOxNZe%7u&)g7 z)izQ}E&Bj&X=x;1_N#1nD&YZem**zE=p7!OdF7rQ)=qr;)yN&E^e=H1{(*NoqE8Ww zUX|to&7T}Ub_e*aCBLBD5w^AFnO8-3P^q%3Wb(9`Q0#qVkF>R!Yebq#60@7--y`S0 zJje<>ynZj7H1*$|L%IHobLju*@cA#-O%9I#m*=Lwf-Ma%PWyP(1iy)%miitdW%a>4 z|FD?zxCP;p^w`mdP+8cKHRDwst#^4-k9nW;eq~ju<}pN_&cBku|5MLC)FHG38{CV> zh7R`^{c{p@!&S9UFdi-j z(c1MCSo4X?{ftkn1wSd%2|;AkdjH1SO;#jTaa+M3yxHHwf$m@uw?FPEdRa_N>odRW zZYGJ${nh(Lu{`=Qg>+eLuANhb>5SKs6F=$e8C9;OoLr7|pI6z<9vO2zlWQNqjU_t{ zSF%I%{P2ukuQQ*Hk~7!JdSl;$PG=1G;@!A&&?+oCsE)p2MW(aL#pE54eL00VD7vRs zp*<^fnobq%g)(WY1P%E|&kfK1`+$^il=KN{gdg22L!nOOm6pI4Al@fq$D|0YMekp% zzri(;pPHVSJ36@hO#5w8Y{z`*xI`)QC_Z$CKG^PngG8lvIk2Y~H*Ff}0F!eHiE4(( zq#ny{l1~a-MwU?3D=uKLlhmd{;H$xUQTNHghWHW^XUnfzLs!OnRB>*{n1c(P@BP+@ zoZmDH&^LDnYP-(6si1+BbPsu)Ce=0)R{7eV{Y@)q`Up=DiaeybP3>KK&P04gtJ?VS zgH_~i73(Xhwh6QqP&f8)S7;NGO@%)`p-}wYxE;79rNV6AD0Pm~C++SA+*(&6Ue2Jo z_5BIvc8mNWk1wXQgzm3^l&5VNThjs!Z%HNbVBejSCIm=HcT)CK6Y~qeOBZbLxNKrjbrO!^b4mo&u^{az19kP?k zik#2FLOk33W%yY~+epg$-gZUWnz5!hB?KRGC*nHHW`^L8+T+b1S$n#rJz&N^<8aVB zMrRZ(l-uGczS5b5x|rG)&%sr&_s{hB)3b^-`z7qMAxDK0^w% zxrE`J;r#T}GoByIsD>&7r6FIVFwYO@c6;%4Q>c=pU%Pr>#)6Wh;crrj;eJT>R|(U_SG=oIGG|k^gT9!~bx6H#b8-q?7KukHS#Iz=-4&i*wrY z!b$H%mE`;yG*B&q!$^lp<&_+yHS|$)n<;RCgh_1k`#1$Ii(d6AJ_bSnIV$&~fB%1a f`cwSh{{RnsQ)gFCXRrktCkGE2Ev=M_G}`|G;RX>D literal 0 HcmV?d00001 diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java new file mode 100644 index 000000000000..430b46efaadc --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosAgent.java @@ -0,0 +1,591 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.RetryCounterFactory; +import org.apache.hadoop.util.Shell; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/*** + * An agent for executing destructive actions for ChaosMonkey. + * Uses ZooKeeper Watchers and LocalShell, to do the killing + * and getting status of service on targeted host without SSH. + * uses given ZNode Structure: + * /perfChaosTest (root) + * | + * | + * /chaosAgents (Used for registration has + * hostname ephemeral nodes as children) + * | + * | + * /chaosAgentTaskStatus (Used for task + * Execution, has hostname persistent + * nodes as child with tasks as their children) + * | + * | + * /hostname + * | + * | + * /task0000001 (command as data) + * (has two types of command : + * 1: starts with "exec" + * for executing a destructive action. + * 2: starts with "bool" for getting + * only status of service. + * + */ +@InterfaceAudience.Private +public class ChaosAgent implements Watcher, Closeable, Runnable { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosAgent.class); + static AtomicBoolean stopChaosAgent = new AtomicBoolean(); + private ZooKeeper zk; + private String quorum; + private String agentName; + private Configuration conf; + private RetryCounterFactory retryCounterFactory; + private volatile boolean connected = false; + + public ChaosAgent(Configuration conf, String quorum, String agentName) { + initChaosAgent(conf, quorum, agentName); + } + + /*** + * sets global params and initiates connection with ZooKeeper then does registration. + * @param conf initial configuration to use + * @param quorum ZK Quorum + * @param agentName AgentName to use + */ + private void initChaosAgent(Configuration conf, String quorum, String agentName) { + this.conf = conf; + this.quorum = quorum; + this.agentName = agentName; + this.retryCounterFactory = new RetryCounterFactory(new RetryCounter.RetryConfig() + .setMaxAttempts(conf.getInt(ChaosConstants.RETRY_ATTEMPTS_KEY, + ChaosConstants.DEFAULT_RETRY_ATTEMPTS)).setSleepInterval( + conf.getLong(ChaosConstants.RETRY_SLEEP_INTERVAL_KEY, + ChaosConstants.DEFAULT_RETRY_SLEEP_INTERVAL))); + try { + this.createZKConnection(null); + this.register(); + } catch (IOException e) { + LOG.error("Error Creating Connection: " + e); + } + } + + /*** + * Creates Connection with ZooKeeper. + * @throws IOException if something goes wrong + */ + private void createZKConnection(Watcher watcher) throws IOException { + if(watcher == null) { + zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, this); + } else { + zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, watcher); + } + LOG.info("ZooKeeper Connection created for ChaosAgent: " + agentName); + } + + //WATCHERS: Below are the Watches used by ChaosAgent + + /*** + * Watcher for notifying if any task is assigned to agent or not, + * by seeking if any Node is being added to agent as Child. + */ + Watcher newTaskCreatedWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + if (watchedEvent.getType() == Event.EventType.NodeChildrenChanged) { + if (!(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName).equals(watchedEvent.getPath())) { + throw new RuntimeException(KeeperException.create( + KeeperException.Code.DATAINCONSISTENCY)); + } + + LOG.info("Change in Tasks Node, checking for Tasks again."); + getTasks(); + } + + } + }; + + //CALLBACKS: Below are the Callbacks used by Chaos Agent + + /** + * Callback used while setting status of a given task, Logs given status. + */ + AsyncCallback.StatCallback setStatusOfTaskZNodeCallback = (rc, path, ctx, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + // Connection to the server was lost while setting status setting again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + setStatusOfTaskZNode(path, (String) ctx); + break; + + case OK: + LOG.info("Status of Task has been set"); + break; + + case NONODE: + LOG.error("Chaos Agent status node does not exists: " + + "check for ZNode directory structure again."); + break; + + default: + LOG.error("Error while setting status of task ZNode: " + + path, KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used while creating a Persistent ZNode tries to create + * ZNode again if Connection was lost in previous try. + */ + AsyncCallback.StringCallback createZNodeCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + createZNode(path, (byte[]) ctx); + break; + case OK: + LOG.info("ZNode created : " + path); + break; + case NODEEXISTS: + LOG.warn("ZNode already registered: " + path); + break; + default: + LOG.error("Error occurred while creating Persistent ZNode: " + path, + KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used while creating a Ephemeral ZNode tries to create ZNode again + * if Connection was lost in previous try. + */ + AsyncCallback.StringCallback createEphemeralZNodeCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + createEphemeralZNode(path, (byte[]) ctx); + break; + case OK: + LOG.info("ZNode created : " + path); + break; + case NODEEXISTS: + LOG.warn("ZNode already registered: " + path); + break; + default: + LOG.error("Error occurred while creating Ephemeral ZNode: ", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + }; + + /** + * Callback used by getTasksForAgentCallback while getting command, + * after getting command successfully, it executes command and + * set its status with respect to the command type. + */ + AsyncCallback.DataCallback getTaskForExecutionCallback = new AsyncCallback.DataCallback() { + @Override + public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connection to the server has been lost while getting task, getting data again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + zk.getData(path, + false, + getTaskForExecutionCallback, + new String(data)); + break; + case OK: + String cmd = new String(data); + LOG.info("Executing command : " + cmd); + String status = ChaosConstants.TASK_COMPLETION_STRING; + try { + String user = conf.get(ChaosConstants.CHAOSAGENT_SHELL_USER, + ChaosConstants.DEFAULT_SHELL_USER); + switch (cmd.substring(0, 4)) { + case "bool": + String ret = execWithRetries(user, cmd.substring(4)).getSecond(); + status = Boolean.toString(ret.length() > 0); + break; + + case "exec": + execWithRetries(user, cmd.substring(4)); + break; + + default: + LOG.error("Unknown Command Type"); + status = ChaosConstants.TASK_ERROR_STRING; + } + } catch (IOException e) { + LOG.error("Got error while executing command : " + cmd + + " On agent : " + agentName + " Error : " + e); + status = ChaosConstants.TASK_ERROR_STRING; + } + + try { + setStatusOfTaskZNode(path, status); + Thread.sleep(ChaosConstants.SET_STATUS_SLEEP_TIME); + } catch (InterruptedException e) { + LOG.error("Error occured after setting status: " + e); + } + + default: + LOG.error("Error occurred while getting data", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + } + }; + + /*** + * Callback used while getting Tasks for agent if call executed without Exception, + * It creates a separate thread for each children to execute given Tasks parallely. + */ + AsyncCallback.ChildrenCallback getTasksForAgentCallback = new AsyncCallback.ChildrenCallback() { + @Override + public void processResult(int rc, String path, Object ctx, List children) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: { + // Connection to the server has been lost, getting tasks again. + try { + recreateZKConnection(); + } catch (Exception e) { + break; + } + getTasks(); + break; + } + + case OK: { + if (children != null) { + try { + + LOG.info("Executing each task as a separate thread"); + List tasksList = new ArrayList<>(); + for (String task : children) { + String threadName = agentName + "_" + task; + Thread t = new Thread(() -> { + + LOG.info("Executing task : " + task + " of agent : " + agentName); + zk.getData(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName + + ChaosConstants.ZNODE_PATH_SEPARATOR + task, + false, + getTaskForExecutionCallback, + task); + + }); + t.setName(threadName); + t.start(); + tasksList.add(t); + + for (Thread thread : tasksList) { + thread.join(); + } + } + } catch (InterruptedException e) { + LOG.error("Error scheduling next task : " + + " for agent : " + agentName + " Error : " + e); + } + } + break; + } + + default: + LOG.error("Error occurred while getting task", + KeeperException.create(KeeperException.Code.get(rc), path)); + } + } + }; + + /*** + * Function to create PERSISTENT ZNODE with given path and data given as params + * @param path Path at which ZNode to create + * @param data Data to put under ZNode + */ + public void createZNode(String path, byte[] data) { + zk.create(path, + data, + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT, + createZNodeCallback, + data); + } + + /*** + * Function to create EPHEMERAL ZNODE with given path and data as params. + * @param path Path at which Ephemeral ZNode to create + * @param data Data to put under ZNode + */ + public void createEphemeralZNode(String path, byte[] data) { + zk.create(path, + data, + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL, + createEphemeralZNodeCallback, + data); + } + + /** + * Checks if given ZNode exists, if not creates a PERSISTENT ZNODE for same. + * + * @param path Path to check for ZNode + */ + private void createIfZNodeNotExists(String path) { + try { + if (zk.exists(path, + false) == null) { + createZNode(path, new byte[0]); + } + } catch (KeeperException | InterruptedException e) { + LOG.error("Error checking given node : " + path + " " + e); + } + } + + /** + * sets given Status for Task Znode + * + * @param taskZNode ZNode to set status + * @param status Status value + */ + public void setStatusOfTaskZNode(String taskZNode, String status) { + LOG.info("Setting status of Task ZNode: " + taskZNode + " status : " + status); + zk.setData(taskZNode, + status.getBytes(), + -1, + setStatusOfTaskZNodeCallback, + null); + } + + /** + * registration of ChaosAgent by checking and creating necessary ZNodes. + */ + private void register() { + createIfZNodeNotExists(ChaosConstants.CHAOS_TEST_ROOT_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE); + createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName); + + createEphemeralZNode(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, new byte[0]); + } + + /*** + * Gets tasks for execution, basically sets Watch on it's respective host's Znode and + * waits for tasks to be assigned, also has a getTasksForAgentCallback + * which handles execution of task. + */ + private void getTasks() { + LOG.info("Getting Tasks for Agent: " + agentName + "and setting watch for new Tasks"); + zk.getChildren(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, + newTaskCreatedWatcher, + getTasksForAgentCallback, + null); + } + + /** + * Below function executes command with retries with given user. + * Uses LocalShell to execute a command. + * + * @param user user name, default none + * @param cmd Command to execute + * @return A pair of Exit Code and Shell output + * @throws IOException Exception while executing shell command + */ + private Pair execWithRetries(String user, String cmd) throws IOException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + return exec(user, cmd); + } catch (IOException e) { + retryOrThrow(retryCounter, e, user, cmd); + } + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException e) { + LOG.warn("Sleep Interrupted: " + e); + } + } + } + + private Pair exec(String user, String cmd) throws IOException { + LOG.info("Executing Shell command: " + cmd + " , user: " + user); + + LocalShell shell = new LocalShell(user, cmd); + try { + shell.execute(); + } catch (Shell.ExitCodeException e) { + String output = shell.getOutput(); + throw new Shell.ExitCodeException(e.getExitCode(), "stderr: " + e.getMessage() + + ", stdout: " + output); + } + LOG.info("Executed Shell command, exit code: {}, output n{}", shell.getExitCode(), shell.getOutput()); + + return new Pair<>(shell.getExitCode(), shell.getOutput()); + } + + private void retryOrThrow(RetryCounter retryCounter, E ex, + String user, String cmd) throws E { + if (retryCounter.shouldRetry()) { + LOG.warn("Local command: {}, user: {}, failed at attempt {}. Retrying until maxAttempts: {}." + + "Exception {}", cmd, user,retryCounter.getAttemptTimes(), retryCounter.getMaxAttempts(), + ex.getMessage()); + return; + } + throw ex; + } + + private boolean isConnected() { + return connected; + } + + @Override + public void close() throws IOException { + LOG.info("Closing ZooKeeper Connection for Chaos Agent : " + agentName); + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error while closing ZooKeeper Connection."); + } + } + + @Override + public void run() { + try { + LOG.info("Running Chaos Agent on : " + agentName); + while (!this.isConnected()) { + Thread.sleep(100); + } + this.getTasks(); + while (!stopChaosAgent.get()) { + Thread.sleep(500); + } + } catch (InterruptedException e) { + LOG.error("Error while running Chaos Agent", e); + } + + } + + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Processing event: " + watchedEvent.toString()); + if (watchedEvent.getType() == Event.EventType.None) { + switch (watchedEvent.getState()) { + case SyncConnected: + connected = true; + break; + case Disconnected: + connected = false; + break; + case Expired: + connected = false; + LOG.error("Session expired creating again"); + try { + createZKConnection(null); + } catch (IOException e) { + LOG.error("Error creating Zookeeper connection", e); + } + default: + LOG.error("Unknown State"); + break; + } + } + } + + private void recreateZKConnection() throws Exception{ + try { + zk.close(); + createZKConnection(newTaskCreatedWatcher); + createEphemeralZNode(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE + + ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, new byte[0]); + } catch (IOException e) { + LOG.error("Error creating new ZK COnnection for agent: {}", agentName + e); + throw e; + } + } + + /** + * Executes Command locally. + */ + protected static class LocalShell extends Shell.ShellCommandExecutor { + + private String user; + private String execCommand; + + public LocalShell(String user, String execCommand) { + super(new String[]{execCommand}); + this.user = user; + this.execCommand = execCommand; + } + + @Override + public String[] getExecString() { + // TODO: Considering Agent is running with same user. + if(!user.equals(ChaosConstants.DEFAULT_SHELL_USER)){ + execCommand = String.format("su -u %1$s %2$s", user, execCommand); + } + return new String[]{"/usr/bin/env", "bash", "-c", execCommand}; + } + + @Override + public void execute() throws IOException { + super.execute(); + } + } +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java new file mode 100644 index 000000000000..54fbe9b10cde --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosConstants.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import org.apache.yetus.audience.InterfaceAudience; + +/*** + * ChaosConstant holds a bunch of Choas-related Constants + */ +@InterfaceAudience.Public +public final class ChaosConstants { + + /*Base ZNode for whole Chaos Testing*/ + public static final String CHAOS_TEST_ROOT_ZNODE = "/hbase"; + + /*Just a / used for path separator*/ + public static final String ZNODE_PATH_SEPARATOR = "/"; + + /*ZNode used for ChaosAgents registration.*/ + public static final String CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE = + CHAOS_TEST_ROOT_ZNODE + ZNODE_PATH_SEPARATOR + "chaosAgents"; + + /*ZNode used for getting status of tasks assigned*/ + public static final String CHAOS_AGENT_STATUS_PERSISTENT_ZNODE = + CHAOS_TEST_ROOT_ZNODE + ZNODE_PATH_SEPARATOR + "chaosAgentTaskStatus"; + + /*Config property for getting number of retries to execute a command*/ + public static final String RETRY_ATTEMPTS_KEY = "hbase.it.clustermanager.retry.attempts"; + + /*Default value for number of retries*/ + public static final int DEFAULT_RETRY_ATTEMPTS = 5; + + /*Config property to sleep in between retries*/ + public static final String RETRY_SLEEP_INTERVAL_KEY = + "hbase.it.clustermanager.retry.sleep.interval"; + + /*Default Sleep time between each retry*/ + public static final int DEFAULT_RETRY_SLEEP_INTERVAL = 5000; + + /*Config property for executing command as specific user*/ + public static final String CHAOSAGENT_SHELL_USER = "hbase.it.clustermanager.ssh.user"; + + /*default user for executing local commands*/ + public static final String DEFAULT_SHELL_USER = ""; + + /*timeout used while creating ZooKeeper connection*/ + public static final int SESSION_TIMEOUT_ZK = 60000 * 10; + + /*Time given to ChaosAgent to set status*/ + public static final int SET_STATUS_SLEEP_TIME = 30 * 1000; + + /*Status String when you get an ERROR while executing task*/ + public static final String TASK_ERROR_STRING = "error"; + + /*Status String when your command gets executed correctly*/ + public static final String TASK_COMPLETION_STRING = "done"; + + /*Name of ChoreService to use*/ + public static final String CHORE_SERVICE_PREFIX = "ChaosService"; + +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java new file mode 100644 index 000000000000..e2abe3d42655 --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosService.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Collection; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; + +/** + * Class used to start/stop Chaos related services (currently chaosagent) + */ +@InterfaceAudience.Private +public class ChaosService { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosService.class.getName()); + + public static void execute(String[] args, Configuration conf) { + LOG.info("arguments : " + Arrays.toString(args)); + + try { + CommandLine cmdline = new GnuParser().parse(getOptions(), args); + if (cmdline.hasOption(ChaosServiceName.CHAOSAGENT.toString().toLowerCase())) { + String actionStr = cmdline.getOptionValue(ChaosServiceName.CHAOSAGENT.toString().toLowerCase()); + try { + ExecutorAction action = ExecutorAction.valueOf(actionStr.toUpperCase()); + if (action == ExecutorAction.START) { + ChaosServiceStart(conf, ChaosServiceName.CHAOSAGENT); + } else if (action == ExecutorAction.STOP) { + ChaosServiceStop(); + } + } catch (IllegalArgumentException e) { + LOG.error("action passed: {} Unexpected action. Please provide only start/stop.", + actionStr, e); + throw new RuntimeException(e); + } + } else { + LOG.error("Invalid Options"); + } + } catch (Exception e) { + LOG.error("Error while starting ChaosService : ", e); + } + } + + private static void ChaosServiceStart(Configuration conf, ChaosServiceName serviceName) { + switch (serviceName) { + case CHAOSAGENT: + ChaosAgent.stopChaosAgent.set(false); + try { + Thread t = new Thread(new ChaosAgent(conf, + ChaosUtils.getZKQuorum(conf), ChaosUtils.getHostName())); + t.start(); + t.join(); + } catch (InterruptedException | UnknownHostException e) { + LOG.error("Failed while executing next task execution of ChaosAgent on : {}", + serviceName, e); + } + break; + default: + LOG.error("Service Name not known : " + serviceName.toString()); + } + } + + private static void ChaosServiceStop() { + ChaosAgent.stopChaosAgent.set(true); + } + + private static Options getOptions() { + Options options = new Options(); + options.addOption(new Option("c", ChaosServiceName.CHAOSAGENT.toString().toLowerCase(), + true, "expecting a start/stop argument")); + options.addOption(new Option("D", ChaosServiceName.GENERIC.toString(), + true, "generic D param")); + LOG.info(Arrays.toString(new Collection[] { options.getOptions() })); + return options; + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + new GenericOptionsParser(conf, args); + + ChoreService choreChaosService = null; + ScheduledChore authChore = AuthUtil.getAuthChore(conf); + + try { + if (authChore != null) { + choreChaosService = new ChoreService(ChaosConstants.CHORE_SERVICE_PREFIX); + choreChaosService.scheduleChore(authChore); + } + + execute(args, conf); + } finally { + if (authChore != null) + choreChaosService.shutdown(); + } + } + + enum ChaosServiceName { + CHAOSAGENT, + GENERIC + } + + + enum ExecutorAction { + START, + STOP + } +} diff --git a/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java new file mode 100644 index 000000000000..da42021bcafb --- /dev/null +++ b/hbase-it/src/main/java/org/apache/hadoop/hbase/chaos/ChaosUtils.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * ChaosUtils holds a bunch of useful functions like getting hostname and getting ZooKeeper quorum. + */ +@InterfaceAudience.Private +public class ChaosUtils { + + public static String getHostName() throws UnknownHostException { + return InetAddress.getLocalHost().getHostName(); + } + + + public static String getZKQuorum(Configuration conf) { + String port = + Integer.toString(conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181)); + String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, "localhost"); + for (int i = 0; i < serverHosts.length; i++) { + serverHosts[i] = serverHosts[i] + ":" + port; + } + return String.join(",", serverHosts); + } + +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java new file mode 100644 index 000000000000..31fb9e3ca604 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java @@ -0,0 +1,332 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ChaosZKClient { + + private static final Logger LOG = LoggerFactory.getLogger(ChaosZKClient.class.getName()); + private static final String CHAOS_AGENT_PARENT_ZNODE = "/hbase/chaosAgents"; + private static final String CHAOS_AGENT_STATUS_ZNODE = "/hbase/chaosAgentTaskStatus"; + private static final String ZNODE_PATH_SEPARATOR = "/"; + private static final String TASK_PREFIX = "task_"; + private static final String TASK_ERROR_STRING = "error"; + private static final String TASK_COMPLETION_STRING = "done"; + private static final String TASK_BOOLEAN_TRUE = "true"; + private static final String TASK_BOOLEAN_FALSE = "false"; + private static final String CONNECTION_LOSS = "ConnectionLoss"; + private static final int SESSION_TIMEOUT_ZK = 10 * 60 * 1000; + private static final int TASK_EXECUTION_TIMEOUT = 5 * 60 * 1000; + private volatile String taskStatus = null; + + private final String quorum; + private ZooKeeper zk; + + public ChaosZKClient(String quorum) { + this.quorum = quorum; + try { + this.createNewZKConnection(); + } catch (IOException e) { + LOG.error("Error creating ZooKeeper Connection: ", e); + } + } + + /** + * Creates connection with ZooKeeper + * @throws IOException when not able to create connection properly + */ + public void createNewZKConnection() throws IOException { + Watcher watcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Created ZooKeeper Connection For executing task"); + } + }; + + this.zk = new ZooKeeper(quorum, SESSION_TIMEOUT_ZK, watcher); + } + + /** + * Checks if ChaosAgent is running or not on target host by checking its ZNode. + * @param hostname hostname to check for chaosagent + * @return true/false whether agent is running or not + */ + private boolean isChaosAgentRunning(String hostname) { + try { + return zk.exists(CHAOS_AGENT_PARENT_ZNODE + ZNODE_PATH_SEPARATOR + hostname, + false) != null; + } catch (KeeperException e) { + if (e.toString().contains(CONNECTION_LOSS)) { + recreateZKConnection(); + try { + return zk.exists(CHAOS_AGENT_PARENT_ZNODE + ZNODE_PATH_SEPARATOR + hostname, + false) != null; + } catch (KeeperException | InterruptedException ie) { + LOG.error("ERROR ", ie); + } + } + } catch (InterruptedException e) { + LOG.error("Error checking for given hostname: {} ERROR: ", hostname, e); + } + return false; + } + + /** + * Creates tasks for target hosts by creating ZNodes. + * Waits for a limited amount of time to complete task to execute. + * @param taskObject Object data represents command + * @return returns status + */ + public String submitTask(final TaskObject taskObject) { + if (isChaosAgentRunning(taskObject.getTaskHostname())) { + LOG.info("Creating task node"); + zk.create(CHAOS_AGENT_STATUS_ZNODE + ZNODE_PATH_SEPARATOR + + taskObject.getTaskHostname() + ZNODE_PATH_SEPARATOR + TASK_PREFIX, + taskObject.getCommand().getBytes(), + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL_SEQUENTIAL, + submitTaskCallback, + taskObject); + long start = System.currentTimeMillis(); + + while ((System.currentTimeMillis() - start) < TASK_EXECUTION_TIMEOUT) { + if(taskStatus != null) { + return taskStatus; + } + Threads.sleep(500); + } + } else { + LOG.info("EHHHHH! ChaosAgent Not running"); + } + return TASK_ERROR_STRING; + } + + /** + * To get status of task submitted + * @param path path at which to get status + * @param ctx path context + */ + private void getStatus(String path , Object ctx) { + LOG.info("Getting Status of task: " + path); + zk.getData(path, + false, + getStatusCallback, + ctx); + } + + /** + * Set a watch on task submitted + * @param name ZNode name to set a watch + * @param taskObject context for ZNode name + */ + private void setStatusWatch(String name, TaskObject taskObject) { + LOG.info("Checking for ZNode and Setting watch for task : " + name); + zk.exists(name, + setStatusWatcher, + setStatusWatchCallback, + taskObject); + } + + /** + * Delete task after getting its status + * @param path path to delete ZNode + */ + private void deleteTask(String path) { + LOG.info("Deleting task: " + path); + zk.delete(path, + -1, + taskDeleteCallback, + null); + } + + //WATCHERS: + + /** + * Watcher to get notification whenever status of task changes. + */ + Watcher setStatusWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + LOG.info("Setting status watch for task: " + watchedEvent.getPath()); + if(watchedEvent.getType() == Event.EventType.NodeDataChanged) { + if(!watchedEvent.getPath().contains(TASK_PREFIX)) { + throw new RuntimeException(KeeperException.create( + KeeperException.Code.DATAINCONSISTENCY)); + } + getStatus(watchedEvent.getPath(), (Object) watchedEvent.getPath()); + + } + } + }; + + //CALLBACKS + + AsyncCallback.DataCallback getStatusCallback = (rc, path, ctx, data, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connectionloss while getting status of task, getting again + recreateZKConnection(); + getStatus(path, ctx); + break; + + case OK: + if (ctx!=null) { + + String status = new String(data); + taskStatus = status; + switch (status) { + case TASK_COMPLETION_STRING: + case TASK_BOOLEAN_TRUE: + case TASK_BOOLEAN_FALSE: + LOG.info("Task executed completely : Status --> " + status); + break; + + case TASK_ERROR_STRING: + LOG.info("There was error while executing task : Status --> " + status); + break; + + default: + LOG.warn("Status of task is undefined!! : Status --> " + status); + } + + deleteTask(path); + } + break; + + default: + LOG.error("ERROR while getting status of task: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.StatCallback setStatusWatchCallback = (rc, path, ctx, stat) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //ConnectionLoss while setting watch on status ZNode, setting again. + recreateZKConnection(); + setStatusWatch(path, (TaskObject) ctx); + break; + + case OK: + if(stat != null) { + getStatus(path, null); + } + break; + + default: + LOG.error("ERROR while setting watch on task ZNode: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.StringCallback submitTaskCallback = (rc, path, ctx, name) -> { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + // Connection to server was lost while submitting task, submitting again. + recreateZKConnection(); + submitTask((TaskObject) ctx); + break; + + case OK: + LOG.info("Task created : " + name); + setStatusWatch(name, (TaskObject) ctx); + break; + + default: + LOG.error("Error submitting task: " + name + " ERROR:" + + KeeperException.create(KeeperException.Code.get(rc))); + } + }; + + AsyncCallback.VoidCallback taskDeleteCallback = new AsyncCallback.VoidCallback() { + @Override + public void processResult(int rc, String path, Object ctx) { + switch (KeeperException.Code.get(rc)) { + case CONNECTIONLOSS: + //Connectionloss while deleting task, deleting again + recreateZKConnection(); + deleteTask(path); + break; + + case OK: + LOG.info("Task Deleted successfully!"); + LOG.info("Closing ZooKeeper Connection"); + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error while closing ZooKeeper Connection."); + } + break; + + default: + LOG.error("ERROR while deleting task: " + path + " ERROR: " + + KeeperException.create(KeeperException.Code.get(rc))); + } + } + }; + + + private void recreateZKConnection() { + try { + zk.close(); + } catch (InterruptedException e) { + LOG.error("Error closing ZK connection : ", e); + } finally { + try { + createNewZKConnection(); + } catch (IOException e) { + LOG.error("Error creating new ZK COnnection for agent: ", e); + } + } + } + + static class TaskObject { + private final String command; + private final String taskHostname; + + public TaskObject(String command, String taskHostname) { + this.command = command; + this.taskHostname = taskHostname; + } + + public String getCommand() { + return this.command; + } + + public String getTaskHostname() { + return taskHostname; + } + } + +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java new file mode 100644 index 000000000000..88f14b0d0d34 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ZNodeClusterManager.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.stream.Collectors; + +import org.apache.hadoop.conf.Configured; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ZNodeClusterManager extends Configured implements ClusterManager { + private static final Logger LOG = LoggerFactory.getLogger(ZNodeClusterManager.class.getName()); + private static final String SIGKILL = "SIGKILL"; + private static final String SIGSTOP = "SIGSTOP"; + private static final String SIGCONT = "SIGCONT"; + public ZNodeClusterManager() { + } + + private String getZKQuorumServersStringFromHbaseConfig() { + String port = + Integer.toString(getConf().getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181)); + String[] serverHosts = getConf().getStrings(HConstants.ZOOKEEPER_QUORUM, "localhost"); + for (int i = 0; i < serverHosts.length; i++) { + serverHosts[i] = serverHosts[i] + ":" + port; + } + return Arrays.asList(serverHosts).stream().collect(Collectors.joining(",")); + } + + private String createZNode(String hostname, String cmd) throws IOException{ + LOG.info("Zookeeper Mode enabled sending command to zookeeper + " + + cmd + "hostname:" + hostname); + ChaosZKClient chaosZKClient = new ChaosZKClient(getZKQuorumServersStringFromHbaseConfig()); + return chaosZKClient.submitTask(new ChaosZKClient.TaskObject(cmd, hostname)); + } + + protected HBaseClusterManager.CommandProvider getCommandProvider(ServiceType service) + throws IOException { + switch (service) { + case HADOOP_DATANODE: + case HADOOP_NAMENODE: + return new HBaseClusterManager.HadoopShellCommandProvider(getConf()); + case ZOOKEEPER_SERVER: + return new HBaseClusterManager.ZookeeperShellCommandProvider(getConf()); + default: + return new HBaseClusterManager.HBaseShellCommandProvider(getConf()); + } + } + + public void signal(ServiceType service, String signal, String hostname) throws IOException { + createZNode(hostname, CmdType.exec.toString() + + getCommandProvider(service).signalCommand(service, signal)); + } + + private void createOpCommand(String hostname, ServiceType service, + HBaseClusterManager.CommandProvider.Operation op) throws IOException{ + createZNode(hostname, CmdType.exec.toString() + + getCommandProvider(service).getCommand(service, op)); + } + + @Override + public void start(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.START); + } + + @Override + public void stop(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.STOP); + } + + @Override + public void restart(ServiceType service, String hostname, int port) throws IOException { + createOpCommand(hostname, service, HBaseClusterManager.CommandProvider.Operation.RESTART); + } + + @Override + public void kill(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGKILL, hostname); + } + + @Override + public void suspend(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGSTOP, hostname); + } + + @Override + public void resume(ServiceType service, String hostname, int port) throws IOException { + signal(service, SIGCONT, hostname); + } + + @Override + public boolean isRunning(ServiceType service, String hostname, int port) throws IOException { + return Boolean.parseBoolean(createZNode(hostname, CmdType.bool.toString() + + getCommandProvider(service).isRunningCommand(service))); + } + + enum CmdType { + exec, + bool + } +} From ca4a1a4b55719f6c4dd64d691b1cb9e6bc8cf6f1 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Mon, 21 Dec 2020 08:08:22 -0800 Subject: [PATCH 600/769] HBASE-25425 Some notes on RawCell (#2797) Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/client/Mutation.java | 10 ---------- .../src/main/java/org/apache/hadoop/hbase/RawCell.java | 6 +++++- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index 6ade9eb8f8e5..ab6fc9475142 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.IndividualBytesFieldCell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.RawCell; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.HeapSize; @@ -1000,25 +999,16 @@ public byte getTypeByte() { @Override public Optional getTag(byte type) { - if (cell instanceof RawCell) { - return ((RawCell) cell).getTag(type); - } return PrivateCellUtil.getTag(cell, type); } @Override public Iterator getTags() { - if (cell instanceof RawCell) { - return ((RawCell) cell).getTags(); - } return PrivateCellUtil.tagsIterator(cell); } @Override public byte[] cloneTags() { - if (cell instanceof RawCell) { - return ((RawCell) cell).cloneTags(); - } return PrivateCellUtil.cloneTags(cell); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java index 85f8b278de47..d29e8ca8bdce 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java @@ -24,8 +24,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * An extended version of cell that gives more power to CPs + * An extended version of Cell that allows CPs manipulate Tags. */ +// Added by HBASE-19092 to expose Tags to CPs (history server) w/o exposing ExtendedCell. +// Why is this in hbase-common and not in hbase-server where it is used? +// RawCell is an odd name for a class that is only for CPs that want to manipulate Tags on +// server-side only w/o exposing ExtendedCell -- super rare, super exotic. @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public interface RawCell extends Cell { static final int MAX_TAGS_LENGTH = (2 * Short.MAX_VALUE) + 1; From f89045644b302ea28111d127ed56d71878a7d437 Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Mon, 21 Dec 2020 21:41:22 +0530 Subject: [PATCH 601/769] =?UTF-8?q?HBASE-25371:=20When=20openRegion=20fail?= =?UTF-8?q?s=20during=20initial=20verification(before=E2=80=A6=20(#2785)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: stack --- .../java/org/apache/hadoop/hbase/regionserver/HRegion.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index ed32fd5293a5..493b74b6b9ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -8135,7 +8135,9 @@ protected HRegion openHRegion(final CancelableProgressable reporter) // MetricsRegionWrapperImpl is already init and not close, // add region close when open failed try { - this.close(); + // It is not required to write sequence id file when region open is failed. + // Passing true to skip the sequence id file write. + this.close(true); } catch (Throwable e) { LOG.warn("Open region: {} failed. Try close region but got exception ", this.getRegionInfo(), e); From 40789e8b9bf28b9e19fa3bb13ebc2bbc5571a4ce Mon Sep 17 00:00:00 2001 From: Huang Zhuoyue Date: Tue, 22 Dec 2020 17:28:25 +0800 Subject: [PATCH 602/769] HBASE-25443 Improve the experience of using the Master webpage by change the loading process of snapshot list to asynchronous --- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 35 ----------- .../hbase-webapps/master/userSnapshots.jsp | 58 +++++++++++++++++++ .../resources/hbase-webapps/static/js/tab.js | 6 +- 3 files changed, 63 insertions(+), 36 deletions(-) create mode 100644 hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 602122db4a31..14e82e8b970c 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -270,7 +270,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
    - <& userSnapshots &>
    @@ -657,40 +656,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); -<%def userSnapshots> -<%java> - List snapshots = master.isInitialized() ? - master.getSnapshotManager().getCompletedSnapshots() : null; - -<%if (snapshots != null && snapshots.size() > 0)%> -
    Id Parent
    - - - - - - - - <%for SnapshotDescription snapshotDesc : snapshots%> - <%java> - TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); - - - - - - - - - - -

    <% snapshots.size() %> snapshot(s) in set. [Snapshot Storefile stats]

    -
    Snapshot NameTableCreation TimeOwnerTTL
    <% snapshotDesc.getName() %> <% snapshotTable.getNameAsString() %> - <% new Date(snapshotDesc.getCreationTime()) %><% snapshotDesc.getOwner() %> - <% snapshotDesc.getTtl() == 0 ? "FOREVER": PrettyPrinter.format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL) %> -
    - - <%def deadRegionServers> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp new file mode 100644 index 000000000000..0b741e1089fd --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/userSnapshots.jsp @@ -0,0 +1,58 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/plain;charset=UTF-8" + import="java.util.List" + import="java.util.Date" + import="org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hadoop.hbase.TableName" + import="org.apache.hadoop.hbase.util.PrettyPrinter" +%> +<% + HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + List snapshots = master.isInitialized() ? + master.getSnapshotManager().getCompletedSnapshots() : null; +%> +<%if (snapshots != null && snapshots.size() > 0) { %> + + + + + + + + + <% for (SnapshotDescription snapshotDesc : snapshots){ %> + <% TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); %> + + + + + + + + + <% } %> +

    <%= snapshots.size() %> snapshot(s) in set. [Snapshot Storefile stats]

    +
    Snapshot NameTableCreation TimeOwnerTTL
    <%= snapshotDesc.getName() %> <%= snapshotTable.getNameAsString() %> + <%= new Date(snapshotDesc.getCreationTime()) %><%= snapshotDesc.getOwner() %> + <%= snapshotDesc.getTtl() == 0 ? "FOREVER": PrettyPrinter.format(String.valueOf(snapshotDesc.getTtl()), PrettyPrinter.Unit.TIME_INTERVAL) %> +
    +<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js b/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js index 31438a1b36f9..808882b9f40c 100644 --- a/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js +++ b/hbase-server/src/main/resources/hbase-webapps/static/js/tab.js @@ -26,6 +26,10 @@ $(document).ready( location.hash = $(e.target).attr('href').substr(1).replace(prefix, ""); $(this).tab('show'); }); + + $.ajax({url:"/userSnapshots.jsp", success:function(result){ + $("#tab_userSnapshots").html(result); + }}); if (location.hash !== '') { var tabItem = $('a[href="' + location.hash.replace("#", "#"+prefix) + '"]'); @@ -35,4 +39,4 @@ $(document).ready( } return true; } -); \ No newline at end of file +); From 68aba131fa5fbc61a8f529896b083e3abe9eddf4 Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Mon, 28 Dec 2020 13:02:06 +0530 Subject: [PATCH 603/769] HBASE-24850 CellComparator perf improvement (#2802) * Using ContiguousCellFormat as a marker alone * Commit the new file * Fix the comparator logic that was an oversight * Fix the sequenceId check order * Adding few more static methods that helps in scan flow like query matcher where we have more cols * Remove ContiguousCellFormat and ensure compare() can be inlined * applying negation as per review comment * Fix checkstyle comments * fix review comments * Address review comments * Fix the checkstyle issues * Fix javadoc Signed-off-by: stack Signed-off-by: AnoopSamJohn Signed-off-by: huaxiangsun --- .../apache/hadoop/hbase/BBKVComparator.java | 173 ------ .../hbase/ByteBufferKeyOnlyKeyValue.java | 12 +- .../hadoop/hbase/CellComparatorImpl.java | 525 ++++++++++++++++-- .../org/apache/hadoop/hbase/CellUtil.java | 50 +- .../org/apache/hadoop/hbase/KeyValue.java | 56 +- .../hadoop/hbase/TestByteBufferKeyValue.java | 2 +- .../regionserver/DataBlockEncodingTool.java | 3 +- 7 files changed, 588 insertions(+), 233 deletions(-) delete mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java deleted file mode 100644 index bc76a9df37e6..000000000000 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/BBKVComparator.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.util.Comparator; - -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.primitives.Longs; - -/** - * A comparator for case where {@link ByteBufferKeyValue} is prevalent type (BBKV - * is base-type in hbase2). Takes a general comparator as fallback in case types are NOT the - * expected ByteBufferKeyValue. - * - *

    This is a tricked-out Comparator at heart of hbase read and write. It is in - * the HOT path so we try all sorts of ugly stuff so we can go faster. See below - * in this javadoc comment for the list. - * - *

    Apply this comparator narrowly so it is fed exclusively ByteBufferKeyValues - * as much as is possible so JIT can settle (e.g. make one per ConcurrentSkipListMap - * in HStore). - * - *

    Exploits specially added methods in BBKV to save on deserializations of shorts, - * longs, etc: i.e. calculating the family length requires row length; pass it in - * rather than recalculate it, and so on. - * - *

    This comparator does static dispatch to private final methods so hotspot is comfortable - * deciding inline. - * - *

    Measurement has it that we almost have it so all inlines from memstore - * ConcurrentSkipListMap on down to the (unsafe) intrinisics that do byte compare - * and deserialize shorts and ints; needs a bit more work. - * - *

    Does not take a Type to compare: i.e. it is not a Comparator<Cell> or - * CellComparator<Cell> or Comparator<ByteBufferKeyValue> because that adds - * another method to the hierarchy -- from compare(Object, Object) - * to dynamic compare(Cell, Cell) to static private compare -- and inlining doesn't happen if - * hierarchy is too deep (it is the case here). - * - *

    Be careful making changes. Compare perf before and after and look at what - * hotspot ends up generating before committing change (jitwatch is helpful here). - * Changing this one class doubled write throughput (HBASE-20483). - */ -@InterfaceAudience.Private -public class BBKVComparator implements Comparator { - protected static final Logger LOG = LoggerFactory.getLogger(BBKVComparator.class); - private final Comparator fallback; - - public BBKVComparator(Comparator fallback) { - this.fallback = fallback; - } - - @Override - public int compare(Object l, Object r) { - if ((l instanceof ByteBufferKeyValue) && (r instanceof ByteBufferKeyValue)) { - return compare((ByteBufferKeyValue)l, (ByteBufferKeyValue)r, false); - } - // Skip calling compare(Object, Object) and go direct to compare(Cell, Cell) - return this.fallback.compare((Cell)l, (Cell)r); - } - - // TODO: Come back here. We get a few percentage points extra of throughput if this is a - // private method. - static int compare(ByteBufferKeyValue left, ByteBufferKeyValue right, - boolean ignoreSequenceid) { - // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not - // sharing gets us a few percent more throughput in compares. If changes here or there, make - // sure done in both places. - - // Compare Rows. Cache row length. - int leftRowLength = left.getRowLength(); - int rightRowLength = right.getRowLength(); - int diff = ByteBufferUtils.compareTo(left.getRowByteBuffer(), left.getRowPosition(), - leftRowLength, - right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); - if (diff != 0) { - return diff; - } - - // If the column is not specified, the "minimum" key type appears as latest in the sorted - // order, regardless of the timestamp. This is used for specifying the last key/value in a - // given row, because there is no "lexicographically last column" (it would be infinitely long). - // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in that - // we can't do memcmp w/ special rules like this. - // TODO: Is there a test for this behavior? - int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); - int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); - int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, - leftFamilyLength); - - // No need of left row length below here. - - byte leftType = left.getTypeByte(leftKeyLength); - if (leftFamilyLength + leftQualifierLength == 0 && - leftType == KeyValue.Type.Minimum.getCode()) { - // left is "bigger", i.e. it appears later in the sorted order - return 1; - } - - int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); - int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); - int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, - rightFamilyLength); - - // No need of right row length below here. - - byte rightType = right.getTypeByte(rightKeyLength); - if (rightFamilyLength + rightQualifierLength == 0 && - rightType == KeyValue.Type.Minimum.getCode()) { - return -1; - } - - // Compare families. - int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); - int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); - diff = ByteBufferUtils.compareTo(left.getFamilyByteBuffer(), leftFamilyPosition, - leftFamilyLength, - right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); - if (diff != 0) { - return diff; - } - - // Compare qualifiers - diff = ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), - left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, - right.getQualifierByteBuffer(), - right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), - rightQualifierLength); - if (diff != 0) { - return diff; - } - - // Timestamps. - // Swap order we pass into compare so we get DESCENDING order. - diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); - if (diff != 0) { - return diff; - } - - // Compare types. Let the delete types sort ahead of puts; i.e. types - // of higher numbers sort before those of lesser numbers. Maximum (255) - // appears ahead of everything, and minimum (0) appears after - // everything. - diff = (0xff & rightType) - (0xff & leftType); - if (diff != 0) { - return diff; - } - - // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid ? diff : Longs.compare(right.getSequenceId(), left.getSequenceId()); - } -} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java index cc7e8d72c3d7..d55733769ddf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java @@ -161,7 +161,11 @@ private int getTimestampOffset() { @Override public byte getTypeByte() { - return ByteBufferUtils.toByte(this.buf, this.offset + this.length - 1); + return getTypeByte(this.length); + } + + byte getTypeByte(int keyLen) { + return ByteBufferUtils.toByte(this.buf, this.offset + keyLen - 1); } @Override @@ -236,7 +240,11 @@ public int getFamilyPosition() { // The position in BB where the family length is added. private int getFamilyLengthPosition() { - return this.offset + Bytes.SIZEOF_SHORT + getRowLength(); + return getFamilyLengthPosition(getRowLength()); + } + + int getFamilyLengthPosition(int rowLength) { + return this.offset + Bytes.SIZEOF_SHORT + rowLength; } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index 4af035a94f16..d55f9bad46fe 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -34,8 +34,7 @@ * format should be taken into consideration, for which the instance of this comparator * should be used. In all other cases the static APIs in this comparator would be enough *

    HOT methods. We spend a good portion of CPU comparing. Anything that makes the compare - * faster will likely manifest at the macro level. See also - * {@link BBKVComparator}. Use it when mostly {@link ByteBufferKeyValue}s. + * faster will likely manifest at the macro level. *

    */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( @@ -57,29 +56,286 @@ public final int compare(final Cell a, final Cell b) { } @Override - public int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { - + public int compare(final Cell l, final Cell r, boolean ignoreSequenceid) { int diff = 0; // "Peel off" the most common path. - if (a instanceof ByteBufferKeyValue && b instanceof ByteBufferKeyValue) { - diff = BBKVComparator.compare((ByteBufferKeyValue)a, (ByteBufferKeyValue)b, ignoreSequenceid); + if (l instanceof KeyValue && r instanceof KeyValue) { + diff = compareKeyValues((KeyValue) l, (KeyValue) r); + if (diff != 0) { + return diff; + } + } else if (l instanceof KeyValue && r instanceof ByteBufferKeyValue) { + diff = compareKVVsBBKV((KeyValue) l, (ByteBufferKeyValue) r); + if (diff != 0) { + return diff; + } + } else if (l instanceof ByteBufferKeyValue && r instanceof KeyValue) { + diff = compareKVVsBBKV((KeyValue) r, (ByteBufferKeyValue) l); + if (diff != 0) { + // negate- Findbugs will complain? + return -diff; + } + } else if (l instanceof ByteBufferKeyValue && r instanceof ByteBufferKeyValue) { + diff = compareBBKV((ByteBufferKeyValue) l, (ByteBufferKeyValue) r); if (diff != 0) { return diff; } } else { - diff = compareRows(a, b); + int leftRowLength = l.getRowLength(); + int rightRowLength = r.getRowLength(); + diff = compareRows(l, leftRowLength, r, rightRowLength); if (diff != 0) { return diff; } - diff = compareWithoutRow(a, b); + diff = compareWithoutRow(l, r); if (diff != 0) { return diff; } } - // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return ignoreSequenceid? diff: Long.compare(b.getSequenceId(), a.getSequenceId()); + return ignoreSequenceid ? diff : Long.compare(r.getSequenceId(), l.getSequenceId()); + } + + private static int compareKeyValues(final KeyValue left, final KeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowArray(), right.getRowOffset(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyOffset(rightFamilyLengthPosition); + diff = Bytes.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, + right.getFamilyArray(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = Bytes.compareTo(left.getQualifierArray(), + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierArray(), right.getQualifierOffset(rightFamilyPosition, rightFamilyLength), + rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + // TODO : Ensure we read the bytes and do the compare instead of the value. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); + } + + private static int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = ByteBufferUtils.compareTo(left.getRowByteBuffer(), left.getRowPosition(), + leftRowLength, right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + diff = ByteBufferUtils.compareTo(left.getFamilyByteBuffer(), leftFamilyPosition, + leftFamilyLength, right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); + } + + private static int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) { + int diff; + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + diff = ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + if (diff != 0) { + return diff; + } + + // If the column is not specified, the "minimum" key type appears as latest in the sorted + // order, regardless of the timestamp. This is used for specifying the last key/value in a + // given row, because there is no "lexicographically last column" (it would be infinitely + // long). + // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // that + // we can't do memcmp w/ special rules like this. + // TODO: Is there a test for this behavior? + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + byte leftType = left.getTypeByte(leftKeyLength); + if (leftType == KeyValue.Type.Minimum.getCode() + && leftFamilyLength + leftQualifierLength == 0) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // No need of right row length below here. + + byte rightType = right.getTypeByte(rightKeyLength); + if (rightType == KeyValue.Type.Minimum.getCode() + && rightFamilyLength + rightQualifierLength == 0) { + return -1; + } + + // Compare families. + int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + diff = ByteBufferUtils.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, + right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); + if (diff != 0) { + return diff; + } + + // Compare qualifiers + diff = ByteBufferUtils.compareTo(left.getQualifierArray(), + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + if (diff != 0) { + return diff; + } + + // Timestamps. + // Swap order we pass into compare so we get DESCENDING order. + diff = Long.compare(right.getTimestamp(rightKeyLength), left.getTimestamp(leftKeyLength)); + if (diff != 0) { + return diff; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & rightType) - (0xff & leftType); } /** @@ -94,6 +350,65 @@ public final int compareColumns(final Cell left, final Cell right) { return compareQualifiers(left, right); } + private int compareColumns(final Cell left, final int leftFamLen, final int leftQualLen, + final Cell right, final int rightFamLen, final int rightQualLen) { + int diff = compareFamilies(left, leftFamLen, right, rightFamLen); + if (diff != 0) { + return diff; + } + return compareQualifiers(left, leftQualLen, right, rightQualLen); + } + + private int compareFamilies(Cell left, int leftFamLen, Cell right, int rightFamLen) { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, right.getFamilyArray(), + right.getFamilyOffset(), rightFamLen); + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + } + return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, + right.getFamilyArray(), right.getFamilyOffset(), rightFamLen); + } + + private final int compareQualifiers(Cell left, int leftQualLen, Cell right, int rightQualLen) { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + leftQualLen, ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + } + return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), leftQualLen, + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + } + /** * Compare the families of left and right cell * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise @@ -125,38 +440,174 @@ public final int compareFamilies(Cell left, Cell right) { right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } + static int compareQualifiers(KeyValue left, KeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyOffset = right.getFamilyOffset(rightFamilyLengthPosition); + + // Compare qualifiers + return Bytes.compareTo(left.getQualifierArray(), leftFamilyOffset + leftFamilyLength, + leftQualifierLength, right.getQualifierArray(), rightFamilyOffset + rightFamilyLength, + rightQualifierLength); + } + + static int compareQualifiers(KeyValue left, ByteBufferKeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierArray(), + leftFamilyOffset + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), + rightFamilyPosition + rightFamilyLength, rightQualifierLength); + } + + static int compareQualifiers(ByteBufferKeyValue left, KeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyOffset = right.getFamilyOffset(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierArray(), + rightFamilyOffset + rightFamilyLength, rightQualifierLength); + } + + static int compareQualifiers(ByteBufferKeyValue left, ByteBufferKeyValue right) { + // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or there, make + // sure done in both places. + // Compare Rows. Cache row length. + int leftRowLength = left.getRowLength(); + int rightRowLength = right.getRowLength(); + + int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); + byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); + int leftKeyLength = left.getKeyLength(); + int leftQualifierLength = + left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + + // No need of left row length below here. + + int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); + byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); + int rightKeyLength = right.getKeyLength(); + int rightQualifierLength = + right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + + // Compare families. + int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); + int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); + + // Compare qualifiers + return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), + rightFamilyPosition + rightFamilyLength, rightQualifierLength); + } + /** * Compare the qualifiers part of the left and right cells. * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise */ @Override public final int compareQualifiers(Cell left, Cell right) { - if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils - .compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) left).getQualifierPosition(), - left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) right).getQualifierPosition(), - right.getQualifierLength()); - } - if (left instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + if ((left instanceof ByteBufferKeyValue) && (right instanceof ByteBufferKeyValue)) { + return compareQualifiers((ByteBufferKeyValue) left, (ByteBufferKeyValue) right); + } else if ((left instanceof KeyValue) && (right instanceof KeyValue)) { + return compareQualifiers((KeyValue) left, (KeyValue) right); + } else if ((left instanceof KeyValue) && (right instanceof ByteBufferKeyValue)) { + return compareQualifiers((KeyValue) left, (ByteBufferKeyValue) right); + } else if ((left instanceof ByteBufferKeyValue) && (right instanceof KeyValue)) { + return compareQualifiers((ByteBufferKeyValue) left, (KeyValue) right); + } else { + if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + } + if (left instanceof ByteBufferExtendedCell) { + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); - } - if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but - // see what FindBugs says - // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO - // It suggest flipping the order to get same effect and 'safer'. - return ByteBufferUtils.compareTo(left.getQualifierArray(), - left.getQualifierOffset(), left.getQualifierLength(), - ((ByteBufferExtendedCell)right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell)right).getQualifierPosition(), right.getQualifierLength()); - } - return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + } + if (right instanceof ByteBufferExtendedCell) { + // Notice how we flip the order of the compare here. We used to negate the return value but + // see what FindBugs says + // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO + // It suggest flipping the order to get same effect and 'safer'. + return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), + left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + } + return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); + } + } /** @@ -195,8 +646,8 @@ static int compareRows(final Cell left, int leftRowLength, final Cell right, int ((ByteBufferExtendedCell)right).getRowByteBuffer(), ((ByteBufferExtendedCell)right).getRowPosition(), rightRowLength); } - return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), - right.getRowArray(), right.getRowOffset(), right.getRowLength()); + return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, + right.getRowArray(), right.getRowOffset(), rightRowLength); } /** @@ -249,10 +700,10 @@ public final int compareWithoutRow(final Cell left, final Cell right) { } if (lFamLength != rFamLength) { // comparing column family is enough. - return compareFamilies(left, right); + return compareFamilies(left, lFamLength, right, rFamLength); } // Compare cf:qualifier - int diff = compareColumns(left, right); + int diff = compareColumns(left, lFamLength, lQualLength, right, rFamLength, rQualLength); if (diff != 0) { return diff; } @@ -282,7 +733,7 @@ public int compareTimestamps(final long ltimestamp, final long rtimestamp) { @Override public Comparator getSimpleComparator() { - return new BBKVComparator(this); + return this; } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index a51fa3de96ef..c3b65e32c11c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -433,6 +433,11 @@ public static boolean matchingRow(final Cell left, final byte[] buf, final int o public static boolean matchingFamily(final Cell left, final Cell right) { byte lfamlength = left.getFamilyLength(); byte rfamlength = right.getFamilyLength(); + return matchingFamily(left, lfamlength, right, rfamlength); + } + + public static boolean matchingFamily(final Cell left, final byte lfamlength, final Cell right, + final byte rfamlength) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), lfamlength, @@ -463,6 +468,11 @@ public static boolean matchingFamily(final Cell left, final byte[] buf) { public static boolean matchingQualifier(final Cell left, final Cell right) { int lqlength = left.getQualifierLength(); int rqlength = right.getQualifierLength(); + return matchingQualifier(left, lqlength, right, rqlength); + } + + private static boolean matchingQualifier(final Cell left, final int lqlength, final Cell right, + final int rqlength) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), lqlength, @@ -516,6 +526,14 @@ public static boolean matchingColumn(final Cell left, final Cell right) { return matchingQualifier(left, right); } + private static boolean matchingColumn(final Cell left, final byte lFamLen, final int lQualLength, + final Cell right, final byte rFamLen, final int rQualLength) { + if (!matchingFamily(left, lFamLen, right, rFamLen)) { + return false; + } + return matchingQualifier(left, lQualLength, right, rQualLength); + } + public static boolean matchingValue(final Cell left, final Cell right) { return matchingValue(left, right, left.getValueLength(), right.getValueLength()); } @@ -685,6 +703,11 @@ public static boolean matchingTimestamp(Cell a, Cell b) { public static boolean matchingRows(final Cell left, final Cell right) { short lrowlength = left.getRowLength(); short rrowlength = right.getRowLength(); + return matchingRows(left, lrowlength, right, rrowlength); + } + + public static boolean matchingRows(final Cell left, final short lrowlength, final Cell right, + final short rrowlength) { if (lrowlength != rrowlength) return false; if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getRowByteBuffer(), @@ -713,16 +736,29 @@ public static boolean matchingRows(final Cell left, final Cell right) { * @return True if same row and column. */ public static boolean matchingRowColumn(final Cell left, final Cell right) { - if ((left.getRowLength() + left.getFamilyLength() - + left.getQualifierLength()) != (right.getRowLength() + right.getFamilyLength() - + right.getQualifierLength())) { + short lrowlength = left.getRowLength(); + short rrowlength = right.getRowLength(); + // match length + if (lrowlength != rrowlength) { + return false; + } + + byte lfamlength = left.getFamilyLength(); + byte rfamlength = right.getFamilyLength(); + if (lfamlength != rfamlength) { return false; } - if (!matchingRows(left, right)) { + int lqlength = left.getQualifierLength(); + int rqlength = right.getQualifierLength(); + if (lqlength != rqlength) { + return false; + } + + if (!matchingRows(left, lrowlength, right, rrowlength)) { return false; } - return matchingColumn(left, right); + return matchingColumn(left, lfamlength, lqlength, right, rfamlength, rqlength); } public static boolean matchingRowColumnBytes(final Cell left, final Cell right) { @@ -732,9 +768,9 @@ public static boolean matchingRowColumnBytes(final Cell left, final Cell right) int rfamlength = right.getFamilyLength(); int lqlength = left.getQualifierLength(); int rqlength = right.getQualifierLength(); + // match length - if ((lrowlength + lfamlength + lqlength) != - (rrowlength + rfamlength + rqlength)) { + if ((lrowlength != rrowlength) || (lfamlength != rfamlength) || (lqlength != rqlength)) { return false; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 856480f15066..79356edfea21 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; + import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -1348,14 +1349,14 @@ public byte[] getFamilyArray() { */ @Override public int getFamilyOffset() { - return getFamilyOffset(getRowLength()); + return getFamilyOffset(getFamilyLengthPosition(getRowLength())); } /** * @return Family offset */ - private int getFamilyOffset(int rlength) { - return this.offset + ROW_KEY_OFFSET + rlength + Bytes.SIZEOF_BYTE; + int getFamilyOffset(int familyLenPosition) { + return familyLenPosition + Bytes.SIZEOF_BYTE; } /** @@ -1363,14 +1364,18 @@ private int getFamilyOffset(int rlength) { */ @Override public byte getFamilyLength() { - return getFamilyLength(getFamilyOffset()); + return getFamilyLength(getFamilyLengthPosition(getRowLength())); } /** * @return Family length */ - public byte getFamilyLength(int foffset) { - return this.bytes[foffset-1]; + public byte getFamilyLength(int famLenPos) { + return this.bytes[famLenPos]; + } + + int getFamilyLengthPosition(int rowLength) { + return this.offset + KeyValue.ROW_KEY_OFFSET + rowLength; } /** @@ -1393,7 +1398,14 @@ public int getQualifierOffset() { * @return Qualifier offset */ private int getQualifierOffset(int foffset) { - return foffset + getFamilyLength(foffset); + return getQualifierOffset(foffset, getFamilyLength()); + } + + /** + * @return Qualifier offset + */ + int getQualifierOffset(int foffset, int flength) { + return foffset + flength; } /** @@ -1408,7 +1420,14 @@ public int getQualifierLength() { * @return Qualifier length */ private int getQualifierLength(int rlength, int flength) { - return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); + return getQualifierLength(getKeyLength(), rlength, flength); + } + + /** + * @return Qualifier length + */ + int getQualifierLength(int keyLength, int rlength, int flength) { + return keyLength - (int) getKeyDataStructureSize(rlength, flength, 0); } /** @@ -1501,7 +1520,11 @@ long getTimestamp(final int keylength) { */ @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1 + ROW_OFFSET]; + return getTypeByte(getKeyLength()); + } + + byte getTypeByte(int keyLength) { + return this.bytes[this.offset + keyLength - 1 + ROW_OFFSET]; } /** @@ -1875,8 +1898,8 @@ public int compareRows(final Cell left, final Cell right) { * @param rlength * @return 0 if equal, <0 if left smaller, >0 if right smaller */ - public int compareRows(byte [] left, int loffset, int llength, - byte [] right, int roffset, int rlength) { + public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, + int rlength) { return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); } @@ -2449,6 +2472,10 @@ public byte getFamilyLength() { return this.bytes[getFamilyOffset() - 1]; } + int getFamilyLengthPosition(int rowLength) { + return this.offset + Bytes.SIZEOF_SHORT + rowLength; + } + @Override public int getFamilyOffset() { return this.offset + Bytes.SIZEOF_SHORT + getRowLength() + Bytes.SIZEOF_BYTE; @@ -2481,9 +2508,14 @@ public short getRowLength() { @Override public byte getTypeByte() { - return this.bytes[this.offset + getKeyLength() - 1]; + return getTypeByte(getKeyLength()); } + byte getTypeByte(int keyLength) { + return this.bytes[this.offset + keyLength - 1]; + } + + private int getQualifierLength(int rlength, int flength) { return getKeyLength() - (int) getKeyDataStructureSize(rlength, flength, 0); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java index 6443d84ebd28..d6c8a75f78d5 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestByteBufferKeyValue.java @@ -69,7 +69,7 @@ public void testCompare() { assertTrue(CellComparatorImpl.COMPARATOR.compare(cell1, cell3) < 0); Cell cell4 = getOffheapCell(row1, Bytes.toBytes("f"), qual2); assertTrue(CellComparatorImpl.COMPARATOR.compare(cell1, cell4) > 0); - BBKVComparator comparator = new BBKVComparator(null); + CellComparator comparator = CellComparator.getInstance(); assertTrue(comparator.compare(cell1, cell2) < 0); assertTrue(comparator.compare(cell1, cell3) < 0); assertTrue(comparator.compare(cell1, cell4) > 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 0fb183e4909b..a8efa16047da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -221,7 +221,8 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) int kLen = currentKV.getKeyLength(); int vLen = currentKV.getValueLength(); - int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset()); + int cfOffset = currentKV.getFamilyOffset(); + int cfLen = currentKV.getFamilyLength(); int restLen = currentKV.getLength() - kLen - vLen; totalKeyLength += kLen; From 035b9ecb047f8c3cdc3f3623465a2f4fcf48d3c6 Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Tue, 29 Dec 2020 02:57:30 +0800 Subject: [PATCH 604/769] HBASE-25432:add security checks for setTableStateInMeta and fixMeta (#2809) Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani --- .../hbase/master/MasterRpcServices.java | 2 ++ .../hbase/security/access/SecureTestUtil.java | 4 +++ .../security/access/TestAccessController.java | 30 +++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index a68aa6650959..a11713276d77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -2527,6 +2527,7 @@ public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest re @Override public GetTableStateResponse setTableStateInMeta(RpcController controller, SetTableStateInMetaRequest request) throws ServiceException { + rpcPreCheck("setTableStateInMeta"); TableName tn = ProtobufUtil.toTableName(request.getTableName()); try { TableState prevState = this.master.getTableStateManager().getTableState(tn); @@ -2732,6 +2733,7 @@ public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProc @Override public FixMetaResponse fixMeta(RpcController controller, FixMetaRequest request) throws ServiceException { + rpcPreCheck("fixMeta"); try { MetaFixer mf = new MetaFixer(this.master); mf.fix(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java index 840c30d80f54..6e0ef5411b17 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; @@ -249,6 +250,9 @@ public static void verifyDenied(User user, AccessTestAction... actions) throws E // is buried in the stack trace Throwable ex = e; do { + if (ex instanceof RemoteWithExtrasException) { + ex = ((RemoteWithExtrasException) ex).unwrapRemoteException(); + } if (ex instanceof AccessDeniedException) { isAccessDeniedException = true; break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index d53a84c260df..7ab808cf1823 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Hbck; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; @@ -72,6 +73,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -379,6 +381,34 @@ public void testUnauthorizedStopMaster() throws Exception { USER_GROUP_WRITE, USER_GROUP_CREATE); } + @Test + public void testUnauthorizedSetTableStateInMeta() throws Exception { + AccessTestAction action = () -> { + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()){ + hbck.setTableStateInMeta(new TableState(TEST_TABLE, TableState.State.DISABLED)); + } + return null; + }; + + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); + } + + @Test + public void testUnauthorizedFixMeta() throws Exception { + AccessTestAction action = () -> { + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()){ + hbck.fixMeta(); + } + return null; + }; + + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); + } + @Test public void testSecurityCapabilities() throws Exception { List capabilities = TEST_UTIL.getConnection().getAdmin() From 928b5ea5d78bb92981f25eea2efe5640db9d1432 Mon Sep 17 00:00:00 2001 From: Pankaj Date: Tue, 29 Dec 2020 22:25:36 +0530 Subject: [PATCH 605/769] HBASE-25379 Make retry pause time configurable for regionserver short operation RPC (reportRegionStateTransition/reportProcedureDone) (#2757) * HBASE-25379 Make retry pause time configurable for regionserver short operation RPC (reportRegionStateTransition/reportProcedureDone) * HBASE-25379 RemoteProcedureResultReporter also should retry after the configured pause time * Addressed the review comments Signed-off-by: Yulin Niu --- .../org/apache/hadoop/hbase/HConstants.java | 11 +++++++++ .../hbase/regionserver/HRegionServer.java | 23 +++++++++++++++---- .../RemoteProcedureResultReporter.java | 7 ++---- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index e1d3de9d513b..05782fc5518c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -965,6 +965,17 @@ public enum OperationStatusCode { */ public static final int DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT = 10000; + /** + * Retry pause time for short operation RPC + */ + public static final String HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME = + "hbase.rpc.shortoperation.retry.pause.time"; + + /** + * Default value of {@link #HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME} + */ + public static final long DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME = 1000; + /** * Value indicating the server name was saved with no sequence number. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 87d073c81c98..bcb143652203 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -435,6 +435,9 @@ public class HRegionServer extends Thread implements private final int shortOperationTimeout; + // Time to pause if master says 'please hold' + private final long retryPauseTime; + private final RegionServerAccounting regionServerAccounting; private SlowLogTableOpsChore slowLogTableOpsChore = null; @@ -615,6 +618,9 @@ public HRegionServer(final Configuration conf) throws IOException { this.shortOperationTimeout = conf.getInt(HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT); + this.retryPauseTime = conf.getLong(HConstants.HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME, + HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME); + this.abortRequested = new AtomicBoolean(false); this.stopped = false; @@ -2436,10 +2442,8 @@ public boolean reportRegionStateTransition(final RegionStateTransitionContext co final ReportRegionStateTransitionRequest request = createReportRegionStateTransitionRequest(context); - // Time to pause if master says 'please hold'. Make configurable if needed. - final long initPauseTime = 1000; int tries = 0; - long pauseTime; + long pauseTime = this.retryPauseTime; // Keep looping till we get an error. We want to send reports even though server is going down. // Only go down if clusterConnection is null. It is set to null almost as last thing as the // HRegionServer does down. @@ -2470,9 +2474,9 @@ public boolean reportRegionStateTransition(final RegionStateTransitionContext co || ioe instanceof CallQueueTooBigException; if (pause) { // Do backoff else we flood the Master with requests. - pauseTime = ConnectionUtils.getPauseTime(initPauseTime, tries); + pauseTime = ConnectionUtils.getPauseTime(this.retryPauseTime, tries); } else { - pauseTime = initPauseTime; // Reset. + pauseTime = this.retryPauseTime; // Reset. } LOG.info("Failed report transition " + TextFormat.shortDebugString(request) + "; retry (#" + tries + ")" + @@ -3938,4 +3942,13 @@ public AsyncClusterConnection getAsyncClusterConnection() { public CompactedHFilesDischarger getCompactedHFilesDischarger() { return compactedFileDischarger; } + + /** + * Return pause time configured in {@link HConstants#HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME}} + * @return pause time + */ + @InterfaceAudience.Private + public long getRetryPauseTime() { + return this.retryPauseTime; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java index 981f090534a3..63e050a710ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java @@ -41,9 +41,6 @@ class RemoteProcedureResultReporter extends Thread { private static final Logger LOG = LoggerFactory.getLogger(RemoteProcedureResultReporter.class); - // Time to pause if master says 'please hold'. Make configurable if needed. - private static final int INIT_PAUSE_TIME_MS = 1000; - private static final int MAX_BATCH = 100; private final HRegionServer server; @@ -98,9 +95,9 @@ public void run() { long pauseTime; if (pause) { // Do backoff else we flood the Master with requests. - pauseTime = ConnectionUtils.getPauseTime(INIT_PAUSE_TIME_MS, tries); + pauseTime = ConnectionUtils.getPauseTime(server.getRetryPauseTime(), tries); } else { - pauseTime = INIT_PAUSE_TIME_MS; // Reset. + pauseTime = server.getRetryPauseTime(); // Reset. } LOG.info("Failed procedure report " + TextFormat.shortDebugString(request) + "; retry (#" + tries + ")" + (pause ? " after " + pauseTime + "ms delay (Master is coming online...)." From 7b78b0f0c811dd68734f6a43c46f5a8ace606240 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 30 Dec 2020 09:47:56 +0800 Subject: [PATCH 606/769] HBASE-25452 Use MatcherAssert.assertThat instead of org.junit.Assert.assertThat (#2826) Signed-off-by: Guanghao Zhang --- .../org/apache/hadoop/hbase/ipc/TestFailedServersLog.java | 2 +- .../hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java | 2 +- .../test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java | 2 +- .../java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java | 2 +- .../test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java | 3 +-- .../org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java | 3 +-- .../apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java | 2 +- .../apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java | 2 +- .../hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java | 2 +- .../org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java | 3 +-- .../hbase/hbtop/screen/field/TestFieldScreenPresenter.java | 2 +- .../hbase/hbtop/screen/help/TestHelpScreenPresenter.java | 2 +- .../hbase/hbtop/screen/mode/TestModeScreenPresenter.java | 2 +- .../screen/top/TestFilterDisplayModeScreenPresenter.java | 2 +- .../hbase/hbtop/screen/top/TestInputModeScreenPresenter.java | 2 +- .../hbtop/screen/top/TestMessageModeScreenPresenter.java | 2 +- .../org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java | 2 +- .../hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java | 2 +- .../apache/hadoop/hbase/http/TestSecurityHeadersFilter.java | 3 ++- .../hadoop/hbase/replication/TestReplicationStateBasic.java | 3 +-- .../hbase/replication/TestZKReplicationQueueStorage.java | 3 +-- .../apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java | 3 ++- .../hbase/client/TestAsyncAdminWithRegionReplicas.java | 2 +- .../apache/hadoop/hbase/client/TestAsyncBufferMutator.java | 2 +- .../hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java | 2 +- .../apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java | 5 +++-- .../apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java | 2 +- .../apache/hadoop/hbase/client/TestAsyncRegionLocator.java | 2 +- .../hadoop/hbase/client/TestAsyncReplicationAdminApi.java | 2 +- .../client/TestAsyncSingleRequestRpcRetryingCaller.java | 2 +- .../java/org/apache/hadoop/hbase/client/TestAsyncTable.java | 2 +- .../org/apache/hadoop/hbase/client/TestAsyncTableBatch.java | 2 +- .../hadoop/hbase/client/TestAsyncTableScanException.java | 2 +- .../org/apache/hadoop/hbase/client/TestCheckAndMutate.java | 2 +- .../org/apache/hadoop/hbase/client/TestFromClientSide4.java | 2 +- .../hadoop/hbase/client/TestFromClientSideScanExcpetion.java | 2 +- .../hadoop/hbase/client/TestScannersFromClientSide.java | 2 +- .../apache/hadoop/hbase/client/TestZKConnectionRegistry.java | 2 +- .../hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java | 2 +- .../apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java | 3 +-- .../hbase/regionserver/TestCompactionLifeCycleTracker.java | 2 +- .../hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java | 2 +- .../apache/hadoop/hbase/regionserver/TestSplitLogWorker.java | 2 +- .../hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java | 2 +- .../hadoop/hbase/replication/SyncReplicationTestBase.java | 2 +- .../hadoop/hbase/replication/TestSerialSyncReplication.java | 2 +- .../hadoop/hbase/replication/TestSyncReplicationActive.java | 2 +- .../TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java | 2 +- .../replication/TestSyncReplicationRemoveRemoteWAL.java | 2 +- .../hadoop/hbase/replication/TestSyncReplicationStandBy.java | 2 +- .../hbase/security/token/TestGenerateDelegationToken.java | 2 +- .../hadoop/hbase/wal/TestSyncReplicationWALProvider.java | 2 +- .../apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java | 3 ++- 56 files changed, 61 insertions(+), 63 deletions(-) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java index 4036a51f01c9..fa44022f8d09 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.ipc; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java index ca2829a8065a..ba1e27258d2d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.ipc; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java index da0d917a826e..339cc40847d3 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hbase.hbtop.Record.entry; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.field.Field; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java index 9dec51e0ce8a..2807fd8ef61e 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java @@ -21,7 +21,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import java.util.ArrayList; import java.util.Arrays; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java index 905e4c8fd7a2..c633e37825ea 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.text.ParseException; @@ -27,7 +27,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetricsBuilder; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java index beb0ee8075d4..dcbdb6b9b8ab 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.field; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java index 106cfe4af47b..4f0864838532 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.TestUtils; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java index 04fd03d1663d..6c498e94eb1d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java index ed397f6adc66..b705531475f3 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java @@ -19,7 +19,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java index ec29fd38f0a1..cbfc7283fc64 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java index 722aa2db03ad..a73d54ea6bb9 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java index 6889639f4584..f718304671c4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java index 92ca7767936e..f094c85f5481 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.hbtop.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.fail; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.TestUtils; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java index 2e2931fd1c17..cbf740430b0a 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.field; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java index 0f7b4e3d063e..245bf615e731 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.help; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java index e6c75b5737dc..1b7e12a6240f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.mode; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.never; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java index 99c29c92d131..414b5b0702c5 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java index a5357cc303ed..b5e9bb9f3ba6 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.inOrder; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java index d4507597579f..0acd79c56d2d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java index 7cba9f6aef36..e0c09dfe1673 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java index 85b901048954..44a8878407a0 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.hbtop.screen.top; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java index 41a1235baaf4..6b9d2c341ed7 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java @@ -22,7 +22,8 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; -import static org.junit.Assert.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; + import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index 348271905fcd..4bb1021b7a42 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java index 74a24ac1eb62..4f1fd3908687 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -28,7 +28,6 @@ import java.util.List; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtility; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java index bf0c69502d52..02611dfaf905 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hbase.rest; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsEqual.equalTo; -import static org.junit.Assert.assertThat; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.rest.client.Client; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index 3596f1c0025d..c447510cbe4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.List; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java index 5e7f6cc5a0f7..874a01c8c711 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index b147d9120f64..6404a89671b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -22,10 +22,10 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import static org.apache.hadoop.hbase.client.RegionReplicaTestHelper.testLocator; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Arrays; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index f8f4e076c804..c61a289df23f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -18,12 +18,13 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -42,9 +43,9 @@ import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStates; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java index 56c1047095bf..c9d47dc65323 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java @@ -19,10 +19,10 @@ import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index 753e4f38d7ca..03eac06a5710 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT; import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.REGION_COPROCESSOR_CONF_KEY; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java index 3defa80421e5..74b5c2fbd3c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java @@ -20,11 +20,11 @@ import static org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index 4205012db112..bf8ce01752e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java index 9e6748e34372..c863ec12a96d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java @@ -19,11 +19,11 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java index 3fb1a14e2477..4fb050ea287c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java @@ -19,11 +19,11 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java index 96ec86b0bfd3..3dbb1d01e820 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java index c40f2c77f4ad..262b6080538b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java index 4c5985a25b3c..5c6a98d4b913 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java index f7f74507f436..a4f79e79667d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 00b8a64b1153..43d14185864f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -21,12 +21,12 @@ import static org.apache.hadoop.hbase.client.TestFromClientSide3.generateHugeValue; import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 427222f8e40c..ac0e19355894 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.client; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import java.io.IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java index d6a007797ea9..b631cf96eacb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java index e685556fd639..94186f227521 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java @@ -16,9 +16,9 @@ package org.apache.hadoop.hbase.quotas; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -27,7 +27,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java index 6cd91a711408..b124e288a481 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.regionserver; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 9acc928756ff..631bc4514489 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.io.InterruptedIOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 2c52bc0c2105..ce5466c57c9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 8a82848f3658..9c2340cb8313 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.regionserver.wal; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java index 23753e211054..a8f3442785fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java index 6d9f1322b340..869d9890d11a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.endsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java index 42adab60b5cb..e87655309e74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java index 9a6d242125b7..47f2b2cceb70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java index 9f8982604537..04b5d65318a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.endsWith; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import org.apache.hadoop.fs.FileStatus; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java index 3bfd9a8a4810..0c1c350b2aae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.replication; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java index c05bcd49f101..dae3abc913ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java @@ -19,8 +19,8 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Arrays; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java index 8189cef081d4..c40ebf82ea0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java @@ -19,8 +19,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import java.io.IOException; import java.util.Optional; diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java index ca661458fe92..a6d9c0f47323 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.zookeeper; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -25,7 +26,6 @@ import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -36,6 +36,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; + import java.io.IOException; import java.util.Collections; import java.util.List; From db1016d43a294fbbb863e2526b6c75a6d3cd2643 Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Thu, 31 Dec 2020 02:52:26 +0800 Subject: [PATCH 607/769] HBASE-25441 : add security check for some APIs in RSRpcServices (#2810) Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/master/HMaster.java | 13 +++++++++++++ .../hadoop/hbase/regionserver/RSRpcServices.java | 9 +++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a61254f56101..a1e68bf3c9e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2784,6 +2784,19 @@ public boolean isInitialized() { return initialized.isReady(); } + /** + * Report whether this master is started + * + * This method is used for testing. + * + * @return true if master is ready to go, false if not. + */ + + @Override + public boolean isOnline() { + return serviceStarted; + } + /** * Report whether this master is in maintenance mode. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index f84a6ebbf4aa..78926d6c39d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2345,6 +2345,7 @@ public RollWALWriterResponse rollWALWriter(final RpcController controller, @QosPriority(priority=HConstants.ADMIN_QOS) public StopServerResponse stopServer(final RpcController controller, final StopServerRequest request) throws ServiceException { + rpcPreCheck("stopServer"); requestCount.increment(); String reason = request.getReason(); regionServer.stop(reason); @@ -2354,6 +2355,7 @@ public StopServerResponse stopServer(final RpcController controller, @Override public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, UpdateFavoredNodesRequest request) throws ServiceException { + rpcPreCheck("updateFavoredNodes"); List openInfoList = request.getUpdateInfoList(); UpdateFavoredNodesResponse.Builder respBuilder = UpdateFavoredNodesResponse.newBuilder(); for (UpdateFavoredNodesRequest.RegionUpdateInfo regionUpdateInfo : openInfoList) { @@ -3774,6 +3776,7 @@ public UpdateConfigurationResponse updateConfiguration( RpcController controller, UpdateConfigurationRequest request) throws ServiceException { try { + requirePermission("updateConfiguration", Permission.Action.ADMIN); this.regionServer.updateConfiguration(); } catch (Exception e) { throw new ServiceException(e); @@ -3806,7 +3809,8 @@ public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots( @Override public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, - ClearRegionBlockCacheRequest request) { + ClearRegionBlockCacheRequest request) throws ServiceException { + rpcPreCheck("clearRegionBlockCache"); ClearRegionBlockCacheResponse.Builder builder = ClearRegionBlockCacheResponse.newBuilder(); CacheEvictionStatsBuilder stats = CacheEvictionStats.builder(); @@ -3933,7 +3937,8 @@ private List getSlowLogPayloads(SlowLogResponseRequest request, @Override @QosPriority(priority = HConstants.ADMIN_QOS) public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, - final ClearSlowLogResponseRequest request) { + final ClearSlowLogResponseRequest request) throws ServiceException { + rpcPreCheck("clearSlowLogsResponses"); final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder(); boolean slowLogsCleaned = Optional.ofNullable(namedQueueRecorder) From 7e35d5022a6b0d59404164d8364c9df2af68db50 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 31 Dec 2020 16:57:27 +0800 Subject: [PATCH 608/769] HBASE-25451 Upgrade commons-io to 2.8.0 (#2825) Signed-off-by: Guanghao Zhang Signed-off-by: stack --- .../hbase/client/AsyncConnectionImpl.java | 7 +++--- .../client/example/AsyncClientExample.java | 2 +- .../hbase/io/FSDataInputStreamWrapper.java | 8 +++---- .../apache/hadoop/hbase/io/hfile/HFile.java | 3 ++- .../hadoop/hbase/io/hfile/HFileInfo.java | 17 +++++++++---- .../apache/hadoop/hbase/master/HMaster.java | 4 ++-- .../apache/hadoop/hbase/util/HBaseFsck.java | 13 +++++----- .../apache/hadoop/hbase/util/RegionMover.java | 4 ++-- ...tractTestAsyncTableRegionReplicasRead.java | 5 ++-- .../hbase/client/TestAsyncAdminBase.java | 5 ++-- .../hbase/client/TestAsyncAdminBuilder.java | 5 ++-- .../client/TestAsyncClusterAdminApi2.java | 5 ++-- .../client/TestAsyncMetaRegionLocator.java | 5 ++-- .../client/TestAsyncNonMetaRegionLocator.java | 5 ++-- ...ncNonMetaRegionLocatorConcurrenyLimit.java | 5 ++-- .../hbase/client/TestAsyncRegionLocator.java | 5 ++-- ...stAsyncSingleRequestRpcRetryingCaller.java | 5 ++-- .../hadoop/hbase/client/TestAsyncTable.java | 5 ++-- ...AsyncTableLocateRegionForDeletedTable.java | 5 ++-- .../client/TestAsyncTableNoncedRetry.java | 5 ++-- .../client/TestAsyncTableScanMetrics.java | 5 ++-- ...talogReplicaLoadBalanceSimpleSelector.java | 6 +++-- .../client/TestMetaRegionLocationCache.java | 5 ++-- .../client/TestZKConnectionRegistry.java | 5 ++-- ...TestMasterOperationsForRegionReplicas.java | 4 ++-- .../TestEndToEndSplitTransaction.java | 24 ++++++------------- .../TestSplitTransactionOnCluster.java | 4 ++-- .../replication/TestReplicationBase.java | 12 ++++++---- .../TestReplicationSyncUpToolBase.java | 13 ++++++---- .../TestSerialReplicationEndpoint.java | 5 ++-- pom.xml | 2 +- 31 files changed, 113 insertions(+), 90 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 83beaf1f40ba..1dbb7e6d211a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -212,9 +212,10 @@ public void close() { if(LOG.isDebugEnabled()){ logCallStack(Thread.currentThread().getStackTrace()); } - IOUtils.closeQuietly(clusterStatusListener); - IOUtils.closeQuietly(rpcClient); - IOUtils.closeQuietly(registry); + IOUtils.closeQuietly(clusterStatusListener, + e -> LOG.warn("failed to close clusterStatusListener", e)); + IOUtils.closeQuietly(rpcClient, e -> LOG.warn("failed to close rpcClient", e)); + IOUtils.closeQuietly(registry, e -> LOG.warn("failed to close registry", e)); if (choreService != null) { choreService.shutdown(); } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java index b773ee89ff57..fdbdbc6244f8 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java @@ -111,7 +111,7 @@ private CompletableFuture closeConn() { CompletableFuture closeFuture = new CompletableFuture<>(); addListener(f, (conn, error) -> { if (error == null) { - IOUtils.closeQuietly(conn); + IOUtils.closeQuietly(conn, e -> LOG.warn("failed to close conn", e)); } closeFuture.complete(null); }); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index d83a9d9da90a..5bbc525b8459 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -21,8 +21,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; @@ -33,6 +31,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, * as well as closing streams. Initialization is not thread-safe, but normal operation is; @@ -289,11 +289,11 @@ public void close() { } updateInputStreamStatistics(this.streamNoFsChecksum); // we do not care about the close exception as it is for reading, no data loss issue. - IOUtils.closeQuietly(streamNoFsChecksum); + Closeables.closeQuietly(streamNoFsChecksum); updateInputStreamStatistics(stream); - IOUtils.closeQuietly(stream); + Closeables.closeQuietly(stream); } public HFileSystem getHfs() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 52b6359d92cd..ed0e84deace6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -505,7 +505,8 @@ public static Reader createReader(ReaderContext context, HFileInfo fileInfo, throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion()); } } catch (Throwable t) { - IOUtils.closeQuietly(context.getInputStreamWrapper()); + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 5d65ff3b3a39..072e5b10628a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -44,7 +44,11 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair; @@ -62,6 +66,9 @@ */ @InterfaceAudience.Private public class HFileInfo implements SortedMap { + + private static final Logger LOG = LoggerFactory.getLogger(HFileInfo.class); + static final String RESERVED_PREFIX = "hfile."; static final byte[] RESERVED_PREFIX_BYTES = Bytes.toBytes(RESERVED_PREFIX); static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); @@ -344,7 +351,8 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr this.hfileContext = createHFileContext(path, trailer, conf); context.getInputStreamWrapper().unbuffer(); } catch (Throwable t) { - IOUtils.closeQuietly(context.getInputStreamWrapper()); + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); throw new CorruptHFileException("Problem reading HFile Trailer from file " + context.getFilePath(), t); } @@ -382,9 +390,10 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException { // close the block reader context.getInputStreamWrapper().unbuffer(); } catch (Throwable t) { - IOUtils.closeQuietly(context.getInputStreamWrapper()); - throw new CorruptHFileException("Problem reading data index and meta index from file " - + context.getFilePath(), t); + IOUtils.closeQuietly(context.getInputStreamWrapper(), + e -> LOG.warn("failed to close input stream wrapper", e)); + throw new CorruptHFileException( + "Problem reading data index and meta index from file " + context.getFilePath(), t); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a1e68bf3c9e4..cbe001e91588 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -51,7 +51,6 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.servlet.http.HttpServlet; -import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -221,6 +220,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server; @@ -832,7 +832,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc HBaseFsck.createLockRetryCounterFactory(this.conf).create()); } finally { if (result != null) { - IOUtils.closeQuietly(result.getSecond()); + Closeables.close(result.getSecond(), true); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 4ea4c9e14dfb..28f0d5eb887b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -138,6 +138,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -511,7 +512,7 @@ private void unlockHbck() { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); do { try { - IOUtils.closeQuietly(hbckOutFd); + Closeables.close(hbckOutFd, true); CommonFSUtils.delete(CommonFSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true); LOG.info("Finishing hbck"); return; @@ -564,7 +565,7 @@ public void connect() throws IOException { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { - IOUtils.closeQuietly(HBaseFsck.this); + IOUtils.closeQuietly(HBaseFsck.this, e -> LOG.warn("", e)); cleanupHbckZnode(); unlockHbck(); } @@ -863,9 +864,9 @@ public void close() throws IOException { zkw.close(); zkw = null; } - IOUtils.closeQuietly(admin); - IOUtils.closeQuietly(meta); - IOUtils.closeQuietly(connection); + IOUtils.closeQuietly(admin, e -> LOG.warn("", e)); + IOUtils.closeQuietly(meta, e -> LOG.warn("", e)); + IOUtils.closeQuietly(connection, e -> LOG.warn("", e)); } } @@ -3845,7 +3846,7 @@ public HBaseFsck exec(ExecutorService exec, String[] args) setRetCode(code); } } finally { - IOUtils.closeQuietly(this); + IOUtils.closeQuietly(this, e -> LOG.warn("", e)); } return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 2f7d351ff996..08042efda68f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -121,8 +121,8 @@ private RegionMover() { @Override public void close() { - IOUtils.closeQuietly(this.admin); - IOUtils.closeQuietly(this.conn); + IOUtils.closeQuietly(this.admin, e -> LOG.warn("failed to close admin", e)); + IOUtils.closeQuietly(this.conn, e -> LOG.warn("failed to close conn", e)); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java index 65c537aa31bc..0a72d836bbc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java @@ -28,7 +28,6 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -46,6 +45,8 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + public abstract class AbstractTestAsyncTableRegionReplicasRead { protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -152,7 +153,7 @@ protected static void waitUntilAllReplicasHaveRow(byte[] row) throws IOException @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java index 70cffd8cfe10..e895f164cb95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java @@ -26,7 +26,6 @@ import java.util.concurrent.ForkJoinPool; import java.util.function.Supplier; import java.util.regex.Pattern; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.StartMiniClusterOption; @@ -43,6 +42,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Class to test AsyncAdmin. */ @@ -92,7 +93,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java index f5df30dbba2a..d4d0703c9c2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java @@ -29,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -53,6 +52,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @RunWith(Parameterized.class) @Category({ LargeTests.class, ClientTests.class }) public class TestAsyncAdminBuilder { @@ -98,7 +99,7 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java index ab6515321c30..e52a2562a456 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.concurrent.TimeUnit; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.HMaster; @@ -37,6 +36,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Only used to test stopMaster/stopRegionServer/shutdown methods. */ @@ -71,7 +72,7 @@ public void setUp() throws Exception { @After @Override public void tearDown() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index 733787773aa0..150670706425 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hbase.client.RegionReplicaTestHelper.testLocator; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; @@ -34,6 +33,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncMetaRegionLocator { @@ -60,7 +61,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(REGISTRY); + Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index 6404a89671b8..99d98c92e7b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -35,7 +35,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -63,6 +62,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) @RunWith(Parameterized.class) public class TestAsyncNonMetaRegionLocator { @@ -116,7 +117,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 88ab3ade2934..6aad76c37a47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -32,7 +32,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -55,6 +54,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncNonMetaRegionLocatorConcurrenyLimit { @@ -136,7 +137,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index 03eac06a5710..bdc688e0d9fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -32,7 +32,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -56,6 +55,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncRegionLocator { @@ -107,7 +108,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index bf8ce01752e5..58c6a04f1537 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -29,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; @@ -44,6 +43,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncSingleRequestRpcRetryingCaller { @@ -79,7 +80,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java index c863ec12a96d..f76c923c77bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java @@ -41,7 +41,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -71,6 +70,8 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @RunWith(Parameterized.class) @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncTable { @@ -128,7 +129,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); assertTrue(ASYNC_CONN.isClosed()); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java index 6ccd9bc46f1c..ac647c693701 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -34,6 +33,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + /** * Fix an infinite loop in {@link AsyncNonMetaRegionLocator}, see the comments on HBASE-21943 for * more details. @@ -69,7 +70,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); assertTrue(ASYNC_CONN.isClosed()); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java index 10b358ff6253..82cc1a8b9616 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java @@ -26,7 +26,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -47,6 +46,8 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncTableNoncedRetry { @@ -114,7 +115,7 @@ public static void setUpBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { - IOUtils.closeQuietly(ASYNC_CONN); + Closeables.close(ASYNC_CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java index 7e9f5d9270ee..f5aa19676909 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java @@ -25,7 +25,6 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.ForkJoinPool; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -45,6 +44,8 @@ import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @RunWith(Parameterized.class) @Category({ MediumTests.class, ClientTests.class }) public class TestAsyncTableScanMetrics { @@ -105,7 +106,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index 6b14286f99ca..a0b49b91a095 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -21,9 +21,9 @@ import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; + import java.io.IOException; import java.util.concurrent.TimeUnit; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -40,6 +40,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestCatalogReplicaLoadBalanceSimpleSelector { @@ -80,7 +82,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index 2bcddc9ea7f2..24e88234048d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -48,6 +47,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({SmallTests.class, MasterTests.class }) public class TestMetaRegionLocationCache { @ClassRule @@ -68,7 +69,7 @@ public static void setUp() throws Exception { @AfterClass public static void cleanUp() throws Exception { - IOUtils.closeQuietly(REGISTRY); + Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index ac0e19355894..82cf0f9bbc97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -48,6 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Category({ MediumTests.class, ClientTests.class }) public class TestZKConnectionRegistry { @@ -69,7 +70,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(REGISTRY); + Closeables.close(REGISTRY, true); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index 01d482c095f5..ad08e3fa5567 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -33,7 +33,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.ClientMetaTableAccessor; @@ -107,7 +106,8 @@ public static void setupBeforeClass() throws Exception { } private static void resetConnections() throws IOException { - IOUtils.closeQuietly(ADMIN, CONNECTION); + Closeables.close(ADMIN, true); + Closeables.close(CONNECTION, true); CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); ADMIN = CONNECTION.getAdmin(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index c01edaa0467e..48ad276af597 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -30,7 +30,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.stream.Collectors; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.ChoreService; @@ -306,24 +305,15 @@ void verifyRegionsUsingMetaTableAccessor() throws Exception { /** verify region boundaries obtained from HTable.getStartEndKeys() */ void verifyRegionsUsingHTable() throws IOException { - Table table = null; - try { - // HTable.getStartEndKeys() - table = connection.getTable(tableName); - - try (RegionLocator rl = connection.getRegionLocator(tableName)) { - Pair keys = rl.getStartEndKeys(); - verifyStartEndKeys(keys); + try (RegionLocator rl = connection.getRegionLocator(tableName)) { + Pair keys = rl.getStartEndKeys(); + verifyStartEndKeys(keys); - Set regions = new TreeSet<>(RegionInfo.COMPARATOR); - for (HRegionLocation loc : rl.getAllRegionLocations()) { - regions.add(loc.getRegion()); - } - verifyTableRegions(regions); + Set regions = new TreeSet<>(RegionInfo.COMPARATOR); + for (HRegionLocation loc : rl.getAllRegionLocations()) { + regions.add(loc.getRegion()); } - - } finally { - IOUtils.closeQuietly(table); + verifyTableRegions(regions); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 98e8c9ee2dc3..f11544f6a7ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -38,7 +38,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -116,6 +115,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -900,7 +900,7 @@ private HMaster abortAndWaitForMaster() throws IOException, InterruptedException HMaster master = cluster.startMaster().getMaster(); cluster.waitForActiveAndReadyMaster(); // reset the connections - IOUtils.closeQuietly(admin); + Closeables.close(admin, true); TESTING_UTIL.invalidateConnection(); admin = TESTING_UTIL.getAdmin(); return master; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 455b27298156..eca0d675cb7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -20,10 +20,10 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -53,8 +53,10 @@ import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** * This class is only a base for other integration-level replication tests. @@ -209,9 +211,9 @@ static void configureClusters(HBaseTestingUtility util1, conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); } - static void restartSourceCluster(int numSlaves) - throws Exception { - IOUtils.closeQuietly(hbaseAdmin, htable1); + static void restartSourceCluster(int numSlaves) throws Exception { + Closeables.close(hbaseAdmin, true); + Closeables.close(htable1, true); UTIL1.shutdownMiniHBaseCluster(); UTIL1.restartHBaseCluster(numSlaves); // Invalidate the cached connection state. @@ -222,7 +224,7 @@ static void restartSourceCluster(int numSlaves) } static void restartTargetHBaseCluster(int numSlaves) throws Exception { - IOUtils.closeQuietly(htable2); + Closeables.close(htable2, true); UTIL2.restartHBaseCluster(numSlaves); // Invalidate the cached connection state CONF2 = UTIL2.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java index ee5276de7ee7..3a45c5bdb9ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_GLOBAL; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -143,24 +142,28 @@ final void syncUp(HBaseTestingUtility util) throws Exception { // Utilities that manager shutdown / restart of source / sink clusters. They take care of // invalidating stale connections after shutdown / restarts. final void shutDownSourceHBaseCluster() throws Exception { - IOUtils.closeQuietly(ht1Source, ht2Source); + Closeables.close(ht1Source, true); + Closeables.close(ht2Source, true); UTIL1.shutdownMiniHBaseCluster(); } final void shutDownTargetHBaseCluster() throws Exception { - IOUtils.closeQuietly(ht1TargetAtPeer1, ht2TargetAtPeer1); + Closeables.close(ht1TargetAtPeer1, true); + Closeables.close(ht2TargetAtPeer1, true); UTIL2.shutdownMiniHBaseCluster(); } final void restartSourceHBaseCluster(int numServers) throws Exception { - IOUtils.closeQuietly(ht1Source, ht2Source); + Closeables.close(ht1Source, true); + Closeables.close(ht2Source, true); UTIL1.restartHBaseCluster(numServers); ht1Source = UTIL1.getConnection().getTable(TN1); ht2Source = UTIL1.getConnection().getTable(TN2); } final void restartTargetHBaseCluster(int numServers) throws Exception { - IOUtils.closeQuietly(ht1TargetAtPeer1, ht2TargetAtPeer1); + Closeables.close(ht1TargetAtPeer1, true); + Closeables.close(ht2TargetAtPeer1, true); UTIL2.restartHBaseCluster(numServers); ht1TargetAtPeer1 = UTIL2.getConnection().getTable(TN1); ht2TargetAtPeer1 = UTIL2.getConnection().getTable(TN2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java index 090129174cca..3ba26f321b03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java @@ -25,8 +25,6 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.LinkedBlockingQueue; - -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -56,6 +54,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ReplicationTests.class, MediumTests.class }) public class TestSerialReplicationEndpoint { @@ -78,7 +77,7 @@ public static void setUp() throws Exception { @AfterClass public static void tearDown() throws Exception { - IOUtils.closeQuietly(CONN); + Closeables.close(CONN, true); UTIL.shutdownMiniCluster(); } diff --git a/pom.xml b/pom.xml index f23d0db549b9..749ab83842bb 100755 --- a/pom.xml +++ b/pom.xml @@ -1611,7 +1611,7 @@ 1.13 1.6 - 2.6 + 2.8.0 3.9 3.6.1 3.4.2 From f88fc25d88cb3754550d1a0ce5865dedaf91ab1b Mon Sep 17 00:00:00 2001 From: leyangyueshan <15891721997@163.com> Date: Thu, 31 Dec 2020 22:37:31 +0800 Subject: [PATCH 609/769] HBASE-25435 Slow metric value can be configured (#2823) * HBASE-25435 Slow metric value can be configured * fix HBASE-25435 * hbase-25435 add blank * fix hbase-25435-2 Co-authored-by: stevenxi --- .../hbase/regionserver/MetricsRegionServer.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 6bbb620e1403..3bd787d10074 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -42,6 +42,7 @@ public class MetricsRegionServer { "hbase.regionserver.enable.table.latencies"; public static final boolean RS_ENABLE_TABLE_METRICS_DEFAULT = true; + public static final String SLOW_METRIC_TIME = "hbase.ipc.slow.metric.time"; private final MetricsRegionServerSource serverSource; private final MetricsRegionServerWrapper regionServerWrapper; private RegionServerTableMetrics tableMetrics; @@ -53,6 +54,8 @@ public class MetricsRegionServer { private Timer bulkLoadTimer; private Meter serverReadQueryMeter; private Meter serverWriteQueryMeter; + protected long slowMetricTime; + protected static final int DEFAULT_SLOW_METRIC_TIME = 1000; // milliseconds public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf, MetricsTable metricsTable) { @@ -68,6 +71,7 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi // create and use metrics from the new hbase-metrics based registry. bulkLoadTimer = metricRegistry.timer("Bulkload"); + slowMetricTime = conf.getLong(SLOW_METRIC_TIME, DEFAULT_SLOW_METRIC_TIME); quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class); serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond"); serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond"); @@ -109,7 +113,7 @@ public void updatePutBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePutBatch(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowPut(); } serverSource.updatePutBatch(t); @@ -135,7 +139,7 @@ public void updateDeleteBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDeleteBatch(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowDelete(); } serverSource.updateDeleteBatch(t); @@ -166,7 +170,7 @@ public void updateGet(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateGet(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowGet(); } serverSource.updateGet(t); @@ -177,7 +181,7 @@ public void updateIncrement(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateIncrement(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowIncrement(); } serverSource.updateIncrement(t); @@ -188,7 +192,7 @@ public void updateAppend(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateAppend(tn, t); } - if (t > 1000) { + if (t > slowMetricTime) { serverSource.incrSlowAppend(); } serverSource.updateAppend(t); From 59498caa77e01a5f6d1f99afe51343b6c0e4c62c Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Thu, 12 Nov 2020 16:52:58 -0500 Subject: [PATCH 610/769] HBASE-25279 Make ZKWatcher ExecutorService launch daemon threads Closes #2651 Signed-off-by: Duo Zhang Signed-off-by: Yulin Niu --- .../main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java index 136134d85dfc..7a9fdd689e21 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java @@ -97,7 +97,7 @@ public class ZKWatcher implements Watcher, Abortable, Closeable { // It is ok to do it in a single thread because the Zookeeper ClientCnxn already serializes the // requests using a single while loop and hence there is no performance degradation. private final ExecutorService zkEventProcessor = Executors.newSingleThreadExecutor( - new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d") + new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); private final Configuration conf; From 4dd003ba47fc1f4dea84f1e28d5a2b89c274c6ce Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Fri, 1 Jan 2021 14:47:34 +0800 Subject: [PATCH 611/769] HBASE-25456 : add security check for setRegionStateInMeta (#2833) Signed-off-by: Viraj Jasani --- .../hbase/master/MasterRpcServices.java | 1 + .../security/access/TestAccessController.java | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index a11713276d77..8f2f0dad4b7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -2550,6 +2550,7 @@ public GetTableStateResponse setTableStateInMeta(RpcController controller, @Override public SetRegionStateInMetaResponse setRegionStateInMeta(RpcController controller, SetRegionStateInMetaRequest request) throws ServiceException { + rpcPreCheck("setRegionStateInMeta"); SetRegionStateInMetaResponse.Builder builder = SetRegionStateInMetaResponse.newBuilder(); try { for (RegionSpecifierAndState s : request.getStatesList()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 7ab808cf1823..17276173ec70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -31,7 +31,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileStatus; @@ -88,6 +91,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.locking.LockProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; @@ -395,6 +399,25 @@ public void testUnauthorizedSetTableStateInMeta() throws Exception { USER_GROUP_WRITE, USER_GROUP_CREATE); } + @Test + public void testUnauthorizedSetRegionStateInMeta() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + final List regions = admin.getRegions(TEST_TABLE); + RegionInfo closeRegion = regions.get(0); + Map newStates = new HashMap<>(); + newStates.put(closeRegion.getEncodedName(), RegionState.State.CLOSED); + AccessTestAction action = () -> { + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()){ + hbck.setRegionStateInMeta(newStates); + } + return null; + }; + + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); + } + @Test public void testUnauthorizedFixMeta() throws Exception { AccessTestAction action = () -> { From 5736fa4d148ee7194620b2add3020a8de1774045 Mon Sep 17 00:00:00 2001 From: GeorryHuang <215175212@qq.com> Date: Sun, 3 Jan 2021 03:22:36 +0800 Subject: [PATCH 612/769] HBASE-24751 Display Task completion time and/or processing duration on Web UI (#2815) Signed-off-by: stack --- .../org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon index f700d3994732..8d090276a807 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon @@ -105,6 +105,7 @@ String parent = ""; Description State Status + Completion Time <%for MonitoredTask task : tasks %> @@ -116,6 +117,7 @@ String parent = ""; <% task.getStatus() %> (since <% StringUtils.formatTimeDiff(now, task.getStatusTime()) %> ago) + <% task.getCompletionTimestamp() < 0 ? task.getState() : new Date(task.getCompletionTimestamp()) %> From 1f8b4d16e93199528574c33d4f33f83b9feb1d0d Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 2 Jan 2021 11:32:47 -0800 Subject: [PATCH 613/769] =?UTF-8?q?HBASE-25438=20Update=20create-release?= =?UTF-8?q?=20mvn=20in=20Dockerfile;=20its=203.6.0;=20make=20=E2=80=A6=20(?= =?UTF-8?q?#2807)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../create-release/hbase-rm/Dockerfile | 19 ++++++++++++++++--- dev-support/create-release/release-build.sh | 10 ++++++---- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index 26cb7e51abb3..ac443b64228d 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -21,6 +21,7 @@ # * Java 8 FROM ubuntu:18.04 + # Install extra needed repos and refresh. # # This is all in a single "RUN" command so that if anything changes, "apt update" is run to fetch @@ -33,7 +34,6 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ libcurl4-openssl-dev='7.58.0-*' \ libxml2-dev='2.9.4+dfsg1-*' \ lsof='4.89+dfsg-*' \ - maven='3.6.0-*' \ openjdk-8-jdk='8u*' \ python-pip='9.0.1-*' \ subversion='1.9.7-*' \ @@ -43,10 +43,23 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -qq -y update \ && update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java \ && pip install \ python-dateutil==2.8.1 + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Install mvn 3.6.3. +ARG MAVEN_VERSION=3.6.3 +ARG SHA=c35a1803a6e70a126e80b2b3ae33eed961f83ed74d18fcd16909b2d44d7dada3203f1ffe726c17ef8dcca2dcaa9fca676987befeadc9b9f759967a8cb77181c0 +ARG BASE_URL=https://apache.osuosl.org/maven/maven-3/${MAVEN_VERSION}/binaries +RUN mkdir -p /opt/maven \ + && curl -fsSL -o /tmp/apache-maven.tar.gz ${BASE_URL}/apache-maven-${MAVEN_VERSION}-bin.tar.gz \ + && echo "${SHA} /tmp/apache-maven.tar.gz" | sha512sum -c - \ + && tar -xzf /tmp/apache-maven.tar.gz -C /opt/maven --strip-components=1 \ + && rm -f /tmp/apache-maven.tar.gz \ + && ln -s /opt/maven/bin/mvn /usr/bin/mvn + # Install Apache Yetus ENV YETUS_VERSION 0.12.0 -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN wget -qO- "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ +RUN curl "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ tar xvz -C /opt ENV YETUS_HOME /opt/apache-yetus-${YETUS_VERSION} diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index 44a594fff3d6..12cef1205bfc 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -136,19 +136,21 @@ if [[ "$1" == "tag" ]]; then git config user.name "$GIT_NAME" git config user.email "$GIT_EMAIL" + git config user.signingkey "${GPG_KEY}" # Create release version maven_set_version "$RELEASE_VERSION" + find . -name pom.xml -exec git add {} \; git add RELEASENOTES.md CHANGES.md - git commit -a -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" + git commit -s -m "Preparing ${PROJECT} release $RELEASE_TAG; tagging and updates to CHANGES.md and RELEASENOTES.md" log "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" - git tag "$RELEASE_TAG" + git tag -s -m "Via create-release" "$RELEASE_TAG" # Create next version maven_set_version "$NEXT_VERSION" - - git commit -a -m "Preparing development version $NEXT_VERSION" + find . -name pom.xml -exec git add {} \; + git commit -s -m "Preparing development version $NEXT_VERSION" if ! is_dry_run; then # Push changes From 6ef297ae36641116c15a78425df2960975e8771c Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 2 Jan 2021 11:34:55 -0800 Subject: [PATCH 614/769] HBASE-25437 Update refguide RC section; point at the dev-tools/create-releases/README.txt rather than repeat its content (#2804) M dev-support/create-release/README.txt Remove redundant text. Add some extra help around figuring state of gpg-agent. M dev-support/create-release/do-release.sh Undo my mistaken commit where I undid test of gpg signing if under docker M dev-support/create-release/release-build.sh Handle '-h' M src/main/asciidoc/_chapters/developer.adoc Point to the README.txt under dev-tools/create-release rather than repeat the text in here. Be more insistent about using scripts. --- dev-support/create-release/README.txt | 71 +++--- dev-support/create-release/do-release.sh | 23 +- dev-support/create-release/release-build.sh | 4 + src/main/asciidoc/_chapters/developer.adoc | 248 ++++++-------------- 4 files changed, 118 insertions(+), 228 deletions(-) diff --git a/dev-support/create-release/README.txt b/dev-support/create-release/README.txt index aff562445e12..42959cd9da19 100644 --- a/dev-support/create-release/README.txt +++ b/dev-support/create-release/README.txt @@ -1,18 +1,10 @@ -Creates a HBase release candidate. The script will update versions, tag the branch, -build HBase binary packages and documentation, and upload maven artifacts to a staging -repository. There is also a dry run mode where only local builds are performed, and -nothing is uploaded to the ASF repos. - -Run with "-h" for options. For example, running below will do all -steps above using the 'rm' dir under Downloads as workspace: - - $ ./do-release-docker.sh -d ~/Downloads/rm +Creates an HBase release candidate. The scripts in this directory came originally from spark -(https://github.com/apache/spark/tree/master/dev/create-release). They were then -modified to suite the hbase context. These scripts supercedes the old -../make_rc.sh script for making release candidates because what is here is more -comprehensive doing more steps of the RM process as well as running in a +(https://github.com/apache/spark/tree/master/dev/create-release). They were +then modified to suit the hbase context. These scripts supercede the old +_../make_rc.sh_ script for making release candidates because what is here is +more comprehensive doing more steps of the RM process as well as running in a container so the RM build environment can be a constant. It: @@ -24,49 +16,52 @@ It: * Pushes release tgzs to the dev dir in a apache dist. * Pushes to repository.apache.org staging. -The entry point is here, in the do-release-docker.sh script. Requires a local -docker; for example, on mac os x, Docker for Desktop installed and running. +The entry point is the do-release-docker.sh script. It requires a local +docker; for example, on mac os x, a Docker for Desktop installed and running. - $ ./do-release-docker.sh -h +(To run a build w/o invoking docker (not recommended!), use _do_release.sh_.) -To run a build w/o invoking docker (not recommended!), use _do_release.sh_. - -Both scripts will query interactively for needed parameters and passphrases. +The scripts will query interactively for needed parameters and passphrases. For explanation of the parameters, execute: $ release-build.sh --help +The scripts run in dry-run mode by default where only local builds are +performed and nothing is uploaded to the ASF repos. Pass the '-f' flag +to remove dry-run mode. + Before starting the RC build, run a reconciliation of what is in JIRA with what is in the commit log. Make sure they align and that anomalies are -explained up in JIRA. - -See http://hbase.apache.org/book.html#maven.release +explained up in JIRA. See http://hbase.apache.org/book.html#maven.release +for how. Regardless of where your release build will run (locally, locally in docker, on a remote machine, etc) you will need a local gpg-agent with access to your -secret keys. A quick way to tell gpg to clear out state and start a gpg-agent -is via the following command phrase: - - $ gpgconf --kill all && gpg-connect-agent /bye - -Before starting an RC build, make sure your local gpg-agent has configs -to properly handle your credentials, especially if you want to avoid -typing the passphrase to your secret key. - -e.g. if you are going to run and step away, best to increase the TTL -on caching the unlocked secret via ~/.gnupg/gpg-agent.conf +secret keys. Before starting an RC build, make sure your local gpg-agent has +configs to properly handle your credentials, especially if you want to avoid +typing the passphrase to your secret key: e.g. if you are going to run +and step away (the RC creation takes ~5 hours), best to increase the TTL on +caching the unlocked secret by setting the following into local your +~/.gnupg/gpg-agent.conf file: # in seconds, e.g. a day default-cache-ttl 86400 max-cache-ttl 86400 +A quick way to tell gpg to clear out state, re-read the gpg-agent.conf file +and start a new gpg-agent is via the following command phrase: + + $ gpgconf --kill all && gpg-connect-agent /bye + +You can verify options took hold with '$ gpg --list-options gpg-agent'. + Similarly, run ssh-agent with your ssh key added if building with docker. Running a build on GCE is easy enough. Here are some notes if of use. -Create an instance. 4CPU/15G/10G disk seems to work well enough. +Create an instance. 4CPU/15G/20G disk seems to work well enough. Once up, run the below to make your machine fit for RC building: -# Presuming debian-compatible OS, do these steps on the VM -# your VM username should be your ASF id, because it will show up in build artifacts. +# Presuming debian-compatible OS, do these steps on the VM. +# Your VM username should be your ASF id, because it will show up in build artifacts. # Follow the docker install guide: https://docs.docker.com/engine/install/debian/ $ sudo apt-get install -y \ apt-transport-https \ @@ -129,7 +124,3 @@ $ git clone https://github.com/apache/hbase.git $ mkdir ~/build $ cd hbase $ ./dev-support/create-release/do-release-docker.sh -d ~/build - -# for building the main repo specifically you can save an extra download by pointing the build -# to the local clone you just made -$ ./dev-support/create-release/do-release-docker.sh -d ~/build -r .git diff --git a/dev-support/create-release/do-release.sh b/dev-support/create-release/do-release.sh index 5566b36c21e2..904d813fc3c6 100755 --- a/dev-support/create-release/do-release.sh +++ b/dev-support/create-release/do-release.sh @@ -88,20 +88,19 @@ if [ "$RUNNING_IN_DOCKER" = "1" ]; then else # Outside docker, need to ask for information about the release. get_release_info - - # Run this stuff when not in docker to check gpg. - gpg_test_file="${TMPDIR}/gpg_test.$$.txt" - echo "Testing gpg signing ${GPG} ${GPG_ARGS[@]} --detach --armor --sign ${gpg_test_file}" - echo "foo" > "${gpg_test_file}" - if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign "${gpg_test_file}" ; then - gpg_agent_help - fi - # In --batch mode we have to be explicit about what we are verifying - if ! "${GPG}" "${GPG_ARGS[@]}" --verify "${gpg_test_file}.asc" "${gpg_test_file}" ; then - gpg_agent_help - fi fi +# Check GPG +gpg_test_file="${TMPDIR}/gpg_test.$$.txt" +echo "Testing gpg signing ${GPG} ${GPG_ARGS[@]} --detach --armor --sign ${gpg_test_file}" +echo "foo" > "${gpg_test_file}" +if ! "${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign "${gpg_test_file}" ; then + gpg_agent_help +fi +# In --batch mode we have to be explicit about what we are verifying +if ! "${GPG}" "${GPG_ARGS[@]}" --verify "${gpg_test_file}.asc" "${gpg_test_file}" ; then + gpg_agent_help +fi GPG_TTY="$(tty)" export GPG_TTY diff --git a/dev-support/create-release/release-build.sh b/dev-support/create-release/release-build.sh index 12cef1205bfc..cb13110877f1 100755 --- a/dev-support/create-release/release-build.sh +++ b/dev-support/create-release/release-build.sh @@ -91,6 +91,10 @@ if [ $# -ne 1 ]; then exit_with_usage fi +if [[ "$1" == "-h" ]]; then + exit_with_usage +fi + if [[ "$*" == *"help"* ]]; then exit_with_usage fi diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index a6939920cb45..59d6b71f5fc6 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -625,54 +625,8 @@ upgrade protobuf later, then we don't have to add the profile '-Paarch64' anymor .Building against HBase 1.x [NOTE] ==== -HBase 1.x requires Java 7 to build. -See <> for Java requirements per HBase release. -==== - -[[maven.settings.xml]] -.Example _~/.m2/settings.xml_ File -==== -Publishing to maven requires you sign the artifacts you want to upload. -For the build to sign them for you, you a properly configured _settings.xml_ in your local repository under _.m2_, such as the following. - -[source,xml] ----- - - - - - apache.snapshots.https - YOUR_APACHE_ID - - YOUR_APACHE_PASSWORD - - - - - - apache.releases.https - YOUR_APACHE_ID - - YOUR_APACHE_PASSWORD - - - - - - apache-release - - YOUR_KEYNAME - - YOUR_KEY_PASSWORD - - - - - ----- +See old refguides for how to build HBase 1.x. +The below is for building hbase2. ==== [[maven.release]] @@ -699,30 +653,12 @@ target release will be included in the generated _CHANGES.md/RELEASENOTES.md_ files that ship with the release so make sure JIRA is correct before you begin. After doing the above, you can move to the manufacture of an RC. -Building an RC is involved. We've tried to script it. In the next section -we describe the script. It is followed by a description of the steps -involved which the script automates. - -[[do-release-docker.sh]] -==== Release Candidate Generating Script - -The _dev-support/create-release/do-release-docker.sh_ Release Candidate (RC) -Generating script is maintained in the master branch but can generate RCs -for any 2.x+ branch (The script does not work against branch-1). Check out -and update the master branch when making RCs. -The script builds in a Docker container to ensure we have a consistent -environment building. It will ask you for passwords for apache and for your -gpg signing key so it can sign and commit on your behalf. The passwords -are passed to gpg-agent in the container and purged along with the container -when the build is done. - -[NOTE] -==== -_dev-support/create-release/do-release-docker.sh_ supercedes the previous -_dev-support/make_rc.sh_ script. It is more comprehensive automating all -steps, rather than a portion, building a RC. -==== +Building an RC is involved so we've scripted it. The script builds in a Docker +container to ensure we have a consistent environment building. It will ask you +for passwords for apache and for your gpg signing key so it can sign and commit +on your behalf. The passwords are passed to gpg-agent in the container and +purged along with the container when the build is done. The script will: @@ -736,112 +672,25 @@ The script will: * Pushes to repository.apache.org staging. * Creates vote email template. -The RC building script is _dev-support/create-release/do-release-docker.sh_. -Pass _-h_ to _dev-support/create-release/do-release-docker.sh_ to -see available options: - -``` -Usage: do-release-docker.sh [options] - -This script runs the release scripts inside a docker image. - -Options: - - -d [path] required. working directory. output will be written to "output" in here. - -n dry run mode. Checks and local builds, but does not upload anything. - -t [tag] tag for the hbase-rm docker image to use for building (default: "latest"). - -j [path] path to local JDK installation to use building. By default the script will - use openjdk8 installed in the docker image. - -s [step] runs a single step of the process; valid steps are: tag, build, publish. if - none specified, runs tag, then build, and then publish. -``` - -Running the below command will do all steps above using the -'rm' working directory under Downloads as workspace: -``` - $ ./dev-support/create-release/do-release-docker.sh -d ~/Downloads/rm -``` - -The script will ask you a set of questions about the release version -and branch, the version to generate the compatibility report against, -and so on, before it starts executing (If you set the appropriate -environment variables, the script will skip asking you questions -- -which can come in handy if you end up having to re-run the script -multiple times). - -On branch 2.1, a Release Candidate (RC) creation can take many hours -(~8 hours) so run your build on a machine you know will be -around for this swath of time. Start the build inside a _screen_ -or _tmux_ session in case you become disconnected from your -build box. - -The build is made of three stages: tag, build, and -publish. If the script fails, you may resort to 'fixing' the -failure manually and then asking the script to run the -subsequent stage rather than start over. - -When the scripts run, they use the passed working directory. -Under the working directory is an _output_ dir. In here is -where the checkouts go, where we build up the _svn_ directory -to _svn_ commit to _apache/dist/dev_, etc. Each step also -dumps a log file in here: e.g. _tag.log_ for the tagging -step and _build.log_ for building. - -The _tagging_ step will checkout hbase, set the version number -in all the poms – e.g. if branch-2.0 is at 2.0.6-SNAPSHOT -and you are making a 2.0.5 RC, it will set the versions in -all poms to 2.0.5 – appropriately. It then generate CHANGES.md -and RELEASENOTES.md by checking out yetus and then -calling its generator scripts. It then commits the poms with -their new versions along with the changed CHANGES.md and -RELEASENOTES.md, tags, and pushes up all changes to the -apache repo. - -The _build_ step will checkout hbase, build all including -javadoc and doc (javadoc takes the bulk of the time – 4 hours plus), -run assemblies to produce src and bin tarballs, sign and hash it -all, and then make a dir under apache dist dev named for the RC. -It will copy all artifacts in here including top-level CHANGES.md -and RELEASENOTES.md. It will generate api diff docs and put them -into this RC dir too. When done, it commits the svn RC. - -The publish step will checkout hbase, build, and then copy up all -artifacts to repository.apache.org (signed and hashed). When done, -it will dump out an email template with all the correct links in place. - -Check the artifacts pushed to the dev distribution directory and up -in repository.apache.org. If all looks good, check the generated -email and send to the dev list. - -Under the create-release dir, scripts should make some sense: -``` -do-release-docker.sh # Main entrance. -do-release.sh . # More checks. Not usable really other than by setting env variables before running it. -release-tag.sh # Does tagging steps. -release-build.sh . # Does the build and publish step. -release-util.sh # Utility used by all of the above. -vote.tmpl # Template for email to send out. -hbase-rm # Has docker image we use. -``` - -If the RC fails, the script will do the right thing when it comes -to edit of the _CHANGES.md_ and _RELEASENOTES.md_ removing the old -and updating the files with the updated content (No harm verifying -though). - -One trick for checking stuff especially in utility is to do as follows: - -``` -$ source release-util.sh ; generate_api_report ../../ rel/2.1.3 2.14RC1 -``` - -i.e. source the release-util.sh script and then run one of its functions -passing args. Helped debugging stuff. - -[[rc_procedure]] +The _dev-support/create-release/do-release-docker.sh_ Release Candidate (RC) +Generating script is maintained in the master branch but can generate RCs +for any 2.x+ branch (The script does not work against branch-1). Check out +and update the master branch when making RCs. See +_dev-support/create-release/README.txt_ for how to configure your +environment and run the script. + +[NOTE] +==== +_dev-support/create-release/do-release-docker.sh_ supercedes the previous +_dev-support/make_rc.sh_ script. It is more comprehensive automating all +steps, rather than a portion, building a RC. +==== + ==== Release Candidate Procedure -Here we describe the steps involved generating a Release Candidate, the steps -automated by the script described in the previous section. +Here we outline the steps involved generating a Release Candidate, the steps +automated by the _dev-support/create-release/do-release-docker.sh_ script +described in the previous section. Running these steps manually tends to +be error-prone so is not recommended. The below is informational only. The process below makes use of various tools, mainly _git_ and _maven_. @@ -859,6 +708,53 @@ MAVEN_OPTS="-Xmx4g -XX:MaxPermSize=256m" mvn package You could also set this in an environment variable or alias in your shell. ==== +[[maven.settings.xml]] +.Example _~/.m2/settings.xml_ File +==== +Publishing to maven requires you sign the artifacts you want to upload. +For the build to sign them for you, you a properly configured _settings.xml_ +in your local repository under _.m2_, such as the following. + +[source,xml] +---- + + + + + apache.snapshots.https + YOUR_APACHE_ID + + YOUR_APACHE_PASSWORD + + + + + + apache.releases.https + YOUR_APACHE_ID + + YOUR_APACHE_PASSWORD + + + + + + apache-release + + YOUR_KEYNAME + + YOUR_KEY_PASSWORD + + + + + +---- +==== + ===== Update the _CHANGES.md_ and _RELEASENOTES.md_ files and the POM files. Update _CHANGES.md_ with the changes since the last release. Be careful with where you put From 9e5e9564e27407e25932991b6cbca57bd0a7e60f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Mon, 4 Jan 2021 01:27:08 +0800 Subject: [PATCH 615/769] Shenshengli hbase-25450 The parameter "hbase.bucketcache.size" is misdescribed (#2821) Signed-off-by: Anoop Sam John Signed-off-by: stack --- .../src/main/java/org/apache/hadoop/hbase/HConstants.java | 4 +--- hbase-common/src/main/resources/hbase-default.xml | 4 +--- .../java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java | 2 +- src/main/asciidoc/_chapters/hbase-default.adoc | 1 + 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 05782fc5518c..9a6912a49ea9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1400,9 +1400,7 @@ public enum OperationStatusCode { public static final String BUCKET_CACHE_IOENGINE_KEY = "hbase.bucketcache.ioengine"; /** - * When using bucket cache, this is a float that EITHER represents a percentage of total heap - * memory size to give to the cache (if < 1.0) OR, it is the capacity in - * megabytes of the cache. + * When using bucket cache, it is the capacity in megabytes of the cache. */ public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size"; diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index def502a62cfc..9092dd147198 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1033,9 +1033,7 @@ possible configurations would overwhelm and obscure the important. hbase.bucketcache.size - A float that EITHER represents a percentage of total heap memory - size to give to the cache (if < 1.0) OR, it is the total capacity in - megabytes of BucketCache. Default: 0.0 + It is the total capacity in megabytes of BucketCache. Default: 0.0 hbase.bucketcache.bucket.sizes diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index 471eb469b7e5..910498040e07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -234,7 +234,7 @@ public static long getOnHeapCacheSize(final Configuration conf) { } /** - * @param conf used to read config for bucket cache size. (< 1 is treated as % and > is treated as MiB) + * @param conf used to read config for bucket cache size. * @return the number of bytes to use for bucket cache, negative if disabled. */ public static long getBucketCacheSize(final Configuration conf) { diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc index 8cbc2dc4f95e..32dfb1650916 100644 --- a/src/main/asciidoc/_chapters/hbase-default.adoc +++ b/src/main/asciidoc/_chapters/hbase-default.adoc @@ -1242,6 +1242,7 @@ Whether or not the bucketcache is used in league with the LRU .Description Used along with bucket cache, this is a float that EITHER represents a percentage of total heap memory size to give to the cache (if < 1.0) OR, it is the capacity in megabytes of the cache. + (After HBase-2.0, "hbase.bucketcache.size" cannot be between 0-1) + .Default `0` when specified as a float From 4f69cff70144e0ecc26cc7d4f7543dfde3d7258c Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Mon, 4 Jan 2021 01:34:00 +0800 Subject: [PATCH 616/769] HBASE-25439 Add BYTE unit in PrettyPrinter.Unit (#2812) Signed-off-by: stack --- .../hbase/client/TableDescriptorBuilder.java | 37 +++++- .../client/TestTableDescriptorBuilder.java | 64 ++++++++- .../org/apache/hadoop/hbase/HConstants.java | 8 ++ .../hadoop/hbase/util/PrettyPrinter.java | 122 ++++++++++++++++++ hbase-shell/src/main/ruby/hbase/admin.rb | 4 +- 5 files changed, 230 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index d2cfff59f31f..fd466654ea4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -41,8 +41,10 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PrettyPrinter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -246,6 +248,15 @@ public class TableDescriptorBuilder { RESERVED_KEYWORDS.add(IS_META_KEY); } + public static PrettyPrinter.Unit getUnit(String key) { + switch (key) { + case MAX_FILESIZE: + return PrettyPrinter.Unit.BYTE; + default: + return PrettyPrinter.Unit.NONE; + } + } + /** * @deprecated namespace table has been folded into the ns family in meta table, do not use this * any more. @@ -458,11 +469,22 @@ public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { return this; } + public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException { + desc.setMaxFileSize(maxFileSize); + return this; + } + public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { desc.setMemStoreFlushSize(memstoreFlushSize); return this; } + public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize) + throws HBaseException { + desc.setMemStoreFlushSize(memStoreFlushSize); + return this; + } + public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { desc.setNormalizerTargetRegionCount(regionCount); return this; @@ -982,6 +1004,11 @@ public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); } + public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { + return setMaxFileSize(Long.parseLong(PrettyPrinter. + valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); + } + /** * Returns the size of the memstore after which a flush to filesystem is * triggered. @@ -1007,6 +1034,12 @@ public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); } + public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) + throws HBaseException { + return setMemStoreFlushSize(Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, + PrettyPrinter.Unit.BYTE))); + } + /** * This sets the class associated with the flush policy which determines * determines the stores need to be flushed when flushing a region. The @@ -1169,7 +1202,7 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForAttr = true; s.append(key); s.append(" => "); - s.append('\'').append(value).append('\''); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); } if (!userKeys.isEmpty()) { @@ -1189,7 +1222,7 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForCfg = true; s.append('\'').append(key).append('\''); s.append(" => "); - s.append('\'').append(value).append('\''); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); } s.append("}"); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index c29c83502edd..425d59022ab0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -218,6 +219,33 @@ public void testGetMaxFileSize() { assertEquals(1111L, desc.getMaxFileSize()); } + @Test + public void testSetMaxFileSize() throws HBaseException { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + + String maxFileSize = "1073741824"; + builder.setMaxFileSize(maxFileSize); + assertEquals(1073741824, builder.build().getMaxFileSize()); + + maxFileSize = "1GB"; + builder.setMaxFileSize(maxFileSize); + assertEquals(1073741824, builder.build().getMaxFileSize()); + + maxFileSize = "10GB 25MB"; + builder.setMaxFileSize(maxFileSize); + assertEquals(10763632640L, builder.build().getMaxFileSize()); + + // ignore case + maxFileSize = "10GB 512mb 512KB 512b"; + builder.setMaxFileSize(maxFileSize); + assertEquals(11274813952L, builder.build().getMaxFileSize()); + + maxFileSize = "10737942528 B (10GB 512KB)"; + builder.setMaxFileSize(maxFileSize); + assertEquals(10737942528L, builder.build().getMaxFileSize()); + } + /** * Test default value handling for memStoreFlushSize */ @@ -231,6 +259,33 @@ public void testGetMemStoreFlushSize() { assertEquals(1111L, desc.getMemStoreFlushSize()); } + @Test + public void testSetMemStoreFlushSize() throws HBaseException { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + + String memstoreFlushSize = "1073741824"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(1073741824, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "1GB"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(1073741824, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "10GB 25MB"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(10763632640L, builder.build().getMemStoreFlushSize()); + + // ignore case + memstoreFlushSize = "10GB 512mb 512KB 512b"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(11274813952L, builder.build().getMemStoreFlushSize()); + + memstoreFlushSize = "10737942528 B (10GB 512KB)"; + builder.setMemStoreFlushSize(memstoreFlushSize); + assertEquals(10737942528L, builder.build().getMemStoreFlushSize()); + } + @Test public void testClassMethodsAreBuilderStyle() { BuilderStyleTest.assertClassesAreBuilderStyle(TableDescriptorBuilder.class); @@ -281,7 +336,7 @@ public void testPriority() { } @Test - public void testStringCustomizedValues() { + public void testStringCustomizedValues() throws HBaseException { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(1000).build(); @@ -292,6 +347,13 @@ public void testStringCustomizedValues() { "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", htd.toStringCustomizedValues()); + + htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528").build(); + assertEquals( + "'testStringCustomizedValues', " + + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + + "MAX_FILESIZE => '10737942528 B (10GB 512KB)'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", + htd.toStringCustomizedValues()); } @Test diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9a6912a49ea9..d31cadd85299 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -700,6 +700,14 @@ public enum OperationStatusCode { public static final int HOUR_IN_SECONDS = 60 * 60; public static final int MINUTE_IN_SECONDS = 60; + /** + * KB, MB, GB, TB equivalent to how many bytes + */ + public static final long KB_IN_BYTES = 1024; + public static final long MB_IN_BYTES = 1024 * KB_IN_BYTES; + public static final long GB_IN_BYTES = 1024 * MB_IN_BYTES; + public static final long TB_IN_BYTES = 1024 * GB_IN_BYTES; + //TODO: although the following are referenced widely to format strings for // the shell. They really aren't a part of the public API. It would be // nice if we could put them somewhere where they did not need to be diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index ff7064b11430..83eb01a635fd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -41,11 +41,17 @@ public final class PrettyPrinter { "((\\d+)\\s*MINUTES?)?\\s*((\\d+)\\s*SECONDS?)?\\s*\\)?"; private static final Pattern INTERVAL_PATTERN = Pattern.compile(INTERVAL_REGEX, Pattern.CASE_INSENSITIVE); + private static final String SIZE_REGEX = "((\\d+)\\s*B?\\s*\\()?\\s*" + + "((\\d+)\\s*TB?)?\\s*((\\d+)\\s*GB?)?\\s*" + + "((\\d+)\\s*MB?)?\\s*((\\d+)\\s*KB?)?\\s*((\\d+)\\s*B?)?\\s*\\)?"; + private static final Pattern SIZE_PATTERN = Pattern.compile(SIZE_REGEX, + Pattern.CASE_INSENSITIVE); public enum Unit { TIME_INTERVAL, LONG, BOOLEAN, + BYTE, NONE } @@ -63,6 +69,9 @@ public static String format(final String value, final Unit unit) { byte[] booleanBytes = Bytes.toBytesBinary(value); human.append(String.valueOf(Bytes.toBoolean(booleanBytes))); break; + case BYTE: + human.append(humanReadableByte(Long.parseLong(value))); + break; default: human.append(value); } @@ -82,6 +91,9 @@ public static String valueOf(final String pretty, final Unit unit) throws HBaseE case TIME_INTERVAL: value.append(humanReadableIntervalToSec(pretty)); break; + case BYTE: + value.append(humanReadableSizeToBytes(pretty)); + break; default: value.append(pretty); } @@ -191,6 +203,116 @@ private static long humanReadableIntervalToSec(final String humanReadableInterva return ttl; } + /** + * Convert a long size to a human readable string. + * Example: 10763632640 -> 10763632640 B (10GB 25MB) + * @param size the size in bytes + * @return human readable string + */ + private static String humanReadableByte(final long size) { + StringBuilder sb = new StringBuilder(); + long tb, gb, mb, kb, b; + + if (size < HConstants.KB_IN_BYTES) { + sb.append(size); + sb.append(" B"); + return sb.toString(); + } + + tb = size / HConstants.TB_IN_BYTES; + gb = (size - HConstants.TB_IN_BYTES * tb) / HConstants.GB_IN_BYTES; + mb = (size - HConstants.TB_IN_BYTES * tb + - HConstants.GB_IN_BYTES * gb) / HConstants.MB_IN_BYTES; + kb = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb + - HConstants.MB_IN_BYTES * mb) / HConstants.KB_IN_BYTES; + b = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb + - HConstants.MB_IN_BYTES * mb - HConstants.KB_IN_BYTES * kb); + + sb.append(size).append(" B ("); + if (tb > 0) { + sb.append(tb); + sb.append("TB"); + } + + if (gb > 0) { + sb.append(tb > 0 ? " " : ""); + sb.append(gb); + sb.append("GB"); + } + + if (mb > 0) { + sb.append(tb + gb > 0 ? " " : ""); + sb.append(mb); + sb.append("MB"); + } + + if (kb > 0) { + sb.append(tb + gb + mb > 0 ? " " : ""); + sb.append(kb); + sb.append("KB"); + } + + if (b > 0) { + sb.append(tb + gb + mb + kb > 0 ? " " : ""); + sb.append(b); + sb.append("B"); + } + + sb.append(")"); + return sb.toString(); + } + + /** + * Convert a human readable size to bytes. + * Examples of the human readable size are: 50 GB 20 MB 1 KB , 25000 B etc. + * The units of size specified can be in uppercase as well as lowercase. Also, if a + * single number is specified without any time unit, it is assumed to be in bytes. + * @param humanReadableSize human readable size + * @return value in bytes + * @throws HBaseException + */ + private static long humanReadableSizeToBytes(final String humanReadableSize) + throws HBaseException { + if (humanReadableSize == null) { + return -1; + } + + try { + return Long.parseLong(humanReadableSize); + } catch(NumberFormatException ex) { + LOG.debug("Given size value is not a number, parsing for human readable format"); + } + + String tb = null; + String gb = null; + String mb = null; + String kb = null; + String b = null; + String expectedSize = null; + long size = 0; + + Matcher matcher = PrettyPrinter.SIZE_PATTERN.matcher(humanReadableSize); + if (matcher.matches()) { + expectedSize = matcher.group(2); + tb = matcher.group(4); + gb = matcher.group(6); + mb = matcher.group(8); + kb = matcher.group(10); + b = matcher.group(12); + } + size += tb != null ? Long.parseLong(tb)*HConstants.TB_IN_BYTES:0; + size += gb != null ? Long.parseLong(gb)*HConstants.GB_IN_BYTES:0; + size += mb != null ? Long.parseLong(mb)*HConstants.MB_IN_BYTES:0; + size += kb != null ? Long.parseLong(kb)*HConstants.KB_IN_BYTES:0; + size += b != null ? Long.parseLong(b):0; + + if (expectedSize != null && Long.parseLong(expectedSize) != size) { + throw new HBaseException("Malformed size string: values in byte and human readable" + + "format do not match"); + } + return size; + } + /** * Pretty prints a collection of any type to a string. Relies on toString() implementation of the * object type. diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index ee54ae7af2b2..6228ad78486d 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1472,7 +1472,7 @@ def list_locks # Parse arguments and update TableDescriptorBuilder accordingly # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity def update_tdb_from_arg(tdb, arg) - tdb.setMaxFileSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MAX_FILESIZE))) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE) + tdb.setMaxFileSize(arg.delete(TableDescriptorBuilder::MAX_FILESIZE)) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE) tdb.setReadOnly(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::READONLY))) if arg.include?(TableDescriptorBuilder::READONLY) tdb.setCompactionEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::COMPACTION_ENABLED))) if arg.include?(TableDescriptorBuilder::COMPACTION_ENABLED) tdb.setSplitEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::SPLIT_ENABLED))) if arg.include?(TableDescriptorBuilder::SPLIT_ENABLED) @@ -1480,7 +1480,7 @@ def update_tdb_from_arg(tdb, arg) tdb.setNormalizationEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZATION_ENABLED))) if arg.include?(TableDescriptorBuilder::NORMALIZATION_ENABLED) tdb.setNormalizerTargetRegionCount(JInteger.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_COUNT))) if arg.include?(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_COUNT) tdb.setNormalizerTargetRegionSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_SIZE))) if arg.include?(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_SIZE) - tdb.setMemStoreFlushSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE))) if arg.include?(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE) + tdb.setMemStoreFlushSize(arg.delete(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE)) if arg.include?(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE) tdb.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(TableDescriptorBuilder::DURABILITY))) if arg.include?(TableDescriptorBuilder::DURABILITY) tdb.setPriority(JInteger.valueOf(arg.delete(TableDescriptorBuilder::PRIORITY))) if arg.include?(TableDescriptorBuilder::PRIORITY) tdb.setFlushPolicyClassName(arg.delete(TableDescriptorBuilder::FLUSH_POLICY)) if arg.include?(TableDescriptorBuilder::FLUSH_POLICY) From 5c4effdb81a9ebb3acd5b82c577bd24da858bb23 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 4 Jan 2021 23:30:32 +0800 Subject: [PATCH 617/769] =?UTF-8?q?HBASE-25457=20Possible=20race=20in=20As?= =?UTF-8?q?yncConnectionImpl=20between=20getChoreServ=E2=80=A6=20(#2839)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Viraj Jasani --- .../hadoop/hbase/client/AsyncConnectionImpl.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 1dbb7e6d211a..8a1ac5aac76d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -28,8 +28,6 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.Optional; import java.util.concurrent.CompletableFuture; @@ -56,11 +54,11 @@ import org.apache.hadoop.hbase.util.ConcurrentMapUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; @@ -187,6 +185,9 @@ private void spawnRenewalChore(final UserGroupInformation user) { * @return ChoreService */ synchronized ChoreService getChoreService() { + if (isClosed()) { + throw new IllegalStateException("connection is already closed"); + } if (choreService == null) { choreService = new ChoreService("AsyncConn Chore Service"); } @@ -216,8 +217,11 @@ public void close() { e -> LOG.warn("failed to close clusterStatusListener", e)); IOUtils.closeQuietly(rpcClient, e -> LOG.warn("failed to close rpcClient", e)); IOUtils.closeQuietly(registry, e -> LOG.warn("failed to close registry", e)); - if (choreService != null) { - choreService.shutdown(); + synchronized (this) { + if (choreService != null) { + choreService.shutdown(); + choreService = null; + } } metrics.ifPresent(MetricsConnection::shutdown); ConnectionOverAsyncConnection c = this.conn; From 8198caa8c64c1a8bd2c1a61de00f98f8a74af952 Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Mon, 4 Jan 2021 23:34:38 +0800 Subject: [PATCH 618/769] HBASE-25447 remoteProc is suspended due to OOM ERROR (#2824) Some OMME can not cause the JVM to exit, like "java.lang.OutOfMemoryError: Direct buffer memory", "java.lang.OutOfMemoryError: unable to create new native thread", as they dont call vmError#next_OnError_command. So abort HMaster when uncaught exception occurs in TimeoutExecutor, the new active Hmaster will resume the suspended procedure. Signed-off-by: Duo Zhang Signed-off-by: stack Signed-off-by: Pankaj Kumar --- .../hbase/procedure2/RemoteProcedureDispatcher.java | 4 ++++ .../hbase/master/procedure/RSProcedureDispatcher.java | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 611fc86f9a3c..a060f14ccf9a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -106,6 +106,10 @@ public boolean start() { return true; } + protected void setTimeoutExecutorUncaughtExceptionHandler(UncaughtExceptionHandler eh) { + timeoutExecutor.setUncaughtExceptionHandler(eh); + } + public boolean stop() { if (!running.getAndSet(false)) { return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index 2f990cb0b064..d028bb40321b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -94,6 +94,7 @@ public boolean start() { if (!super.start()) { return false; } + setTimeoutExecutorUncaughtExceptionHandler(this::abort); if (master.isStopped()) { LOG.debug("Stopped"); return false; @@ -126,6 +127,13 @@ public boolean start() { return true; } + private void abort(Thread t, Throwable e) { + LOG.error("Caught error", e); + if (!master.isStopped() && !master.isStopping() && !master.isAborted()) { + master.abort("Aborting master", e); + } + } + @Override public boolean stop() { if (!super.stop()) { From 88316f017acf2abae8bb025523da7a1e9d6d15f1 Mon Sep 17 00:00:00 2001 From: Mate Szalay-Beko Date: Tue, 5 Jan 2021 09:24:24 +0100 Subject: [PATCH 619/769] HBASE-25318 Config option for IntegrationTestImportTsv where to generate HFiles to bulkload (#2777) IntegrationTestImportTsv is generating HFiles under the working directory of the current hdfs user executing the tool, before bulkloading it into HBase. Assuming you encrypt the HBase root directory within HDFS (using HDFS Transparent Encryption), you can bulkload HFiles only if they sit in the same encryption zone in HDFS as the HBase root directory itself. When IntegrationTestImportTsv is executed against a real distributed cluster and the working directory of the current user (e.g. /user/hbase) is not in the same encryption zone as the HBase root directory (e.g. /hbase/data) then you will get an exception: ``` ERROR org.apache.hadoop.hbase.regionserver.HRegion: There was a partial failure due to IO when attempting to load d : hdfs://mycluster/user/hbase/test-data/22d8460d-04cc-e032-88ca-2cc20a7dd01c/ IntegrationTestImportTsv/hfiles/d/74655e3f8da142cb94bc31b64f0475cc org.apache.hadoop.ipc.RemoteException(java.io.IOException): /user/hbase/test-data/22d8460d-04cc-e032-88ca-2cc20a7dd01c/ IntegrationTestImportTsv/hfiles/d/74655e3f8da142cb94bc31b64f0475cc can't be moved into an encryption zone. ``` In this commit I make it configurable where the IntegrationTestImportTsv generates the HFiles. Co-authored-by: Mate Szalay-Beko Signed-off-by: Peter Somogyi --- .../mapreduce/IntegrationTestImportTsv.java | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index c80d61c4ea66..28b4ae467dda 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; @@ -29,6 +30,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; +import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -66,6 +68,8 @@ public class IntegrationTestImportTsv extends Configured implements Tool { private static final String NAME = IntegrationTestImportTsv.class.getSimpleName(); private static final Logger LOG = LoggerFactory.getLogger(IntegrationTestImportTsv.class); + private static final String GENERATED_HFILE_FOLDER_PARAM_KEY = + "IntegrationTestImportTsv.generatedHFileFolder"; protected static final String simple_tsv = "row1\t1\tc1\tc2\n" + @@ -190,8 +194,8 @@ public void testGenerateAndLoad() throws Exception { void generateAndLoad(final TableName table) throws Exception { LOG.info("Running test testGenerateAndLoad."); String cf = "d"; - Path hfiles = new Path( - util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); + Path hfiles = initGeneratedHFilePath(table); + LOG.info("The folder where the HFiles will be generated: {}", hfiles.toString()); Map args = new HashMap<>(); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); @@ -220,6 +224,12 @@ public int run(String[] args) throws Exception { System.err.println(format("%s [genericOptions]", NAME)); System.err.println(" Runs ImportTsv integration tests against a distributed cluster."); System.err.println(); + System.err.println(" Use '-D" + GENERATED_HFILE_FOLDER_PARAM_KEY + "=' to define a"); + System.err.println(" base folder for the generated HFiles. If HDFS Transparent Encryption"); + System.err.println(" is configured, then make sure to set this parameter to a folder in"); + System.err.println(" the same encryption zone in HDFS as the HBase root directory,"); + System.err.println(" otherwise the bulkload will fail."); + System.err.println(); ToolRunner.printGenericCommandUsage(System.err); return 1; } @@ -237,6 +247,28 @@ public int run(String[] args) throws Exception { return 0; } + private Path initGeneratedHFilePath(final TableName table) throws IOException { + String folderParam = getConf().getTrimmed(GENERATED_HFILE_FOLDER_PARAM_KEY); + if (folderParam == null || folderParam.isEmpty()) { + // by default, fall back to the test data dir + return new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); + } + + Path hfiles = new Path(folderParam, UUID.randomUUID().toString()); + FileSystem fs = util.getTestFileSystem(); + String shouldPreserve = System.getProperty("hbase.testing.preserve.testdir", "false"); + if (!Boolean.parseBoolean(shouldPreserve)) { + if (fs.getUri().getScheme().equals(FileSystem.getLocal(getConf()).getUri().getScheme())) { + File localFoler = new File(hfiles.toString()); + localFoler.deleteOnExit(); + } else { + fs.deleteOnExit(hfiles); + } + } + return hfiles; + } + + public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); IntegrationTestingUtility.setUseDistributedCluster(conf); From b53d23728a4727394bd693e940de32ccf5212f43 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 5 Jan 2021 10:21:26 +0000 Subject: [PATCH 620/769] =?UTF-8?q?HBASE-24813=20ReplicationSource=20shoul?= =?UTF-8?q?d=20clear=20buffer=20usage=20on=20Replicatio=E2=80=A6=20(#2546)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ankit Singhal --- .../regionserver/ReplicationSource.java | 5 ++ .../ReplicationSourceShipper.java | 54 +++++++++++++++++++ .../ReplicationSourceWALReader.java | 3 +- .../regionserver/TestReplicationSource.java | 54 +++++++++++++++++-- 4 files changed, 112 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 423ec0e0005e..317db6628f59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -686,6 +686,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, Threads.shutdown(initThread, this.sleepForRetries); } Collection workers = workerThreads.values(); + for (ReplicationSourceShipper worker : workers) { worker.stopWorker(); if(worker.entryReader != null) { @@ -696,6 +697,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, if (this.replicationEndpoint != null) { this.replicationEndpoint.stop(); } + for (ReplicationSourceShipper worker : workers) { if (worker.isAlive() || worker.entryReader.isAlive()) { try { @@ -714,6 +716,9 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, worker.entryReader.interrupt(); } } + //If worker is already stopped but there was still entries batched, + //we need to clear buffer used for non processed entries + worker.clearWALEntryBatch(); } if (join) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index d3af995d6d9c..78bf42fb9045 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.atomic.LongAccumulator; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -323,4 +325,56 @@ void stopWorker() { public boolean isFinished() { return state == WorkerState.FINISHED; } + + /** + * Attempts to properly update ReplicationSourceManager.totalBufferUser, + * in case there were unprocessed entries batched by the reader to the shipper, + * but the shipper didn't manage to ship those because the replication source is being terminated. + * In that case, it iterates through the batched entries and decrease the pending + * entries size from ReplicationSourceManager.totalBufferUser + *

    + * NOTES + * 1) This method should only be called upon replication source termination. + * It blocks waiting for both shipper and reader threads termination, + * to make sure no race conditions + * when updating ReplicationSourceManager.totalBufferUser. + * + * 2) It does not attempt to terminate reader and shipper threads. Those must + * have been triggered interruption/termination prior to calling this method. + */ + void clearWALEntryBatch() { + long timeout = System.currentTimeMillis() + this.shipEditsTimeout; + while(this.isAlive() || this.entryReader.isAlive()){ + try { + if (System.currentTimeMillis() >= timeout) { + LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", + this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive()); + return; + } else { + // Wait both shipper and reader threads to stop + Thread.sleep(this.sleepForRetries); + } + } catch (InterruptedException e) { + LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. " + + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); + return; + } + } + LongAccumulator totalToDecrement = new LongAccumulator((a,b) -> a + b, 0); + entryReader.entryBatchQueue.forEach(w -> { + entryReader.entryBatchQueue.remove(w); + w.getWalEntries().forEach(e -> { + long entrySizeExcludeBulkLoad = ReplicationSourceWALReader.getEntrySizeExcludeBulkLoad(e); + totalToDecrement.accumulate(entrySizeExcludeBulkLoad); + }); + }); + if( LOG.isTraceEnabled()) { + LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.", + totalToDecrement.longValue()); + } + long newBufferUsed = source.getSourceManager().getTotalBufferUsed() + .addAndGet(-totalToDecrement.longValue()); + source.getSourceManager().getGlobalMetrics().setWALReaderEditsBufferBytes(newBufferUsed); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index c71db1bf785b..a6d87870b495 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -60,7 +60,8 @@ class ReplicationSourceWALReader extends Thread { private final WALEntryFilter filter; private final ReplicationSource source; - private final BlockingQueue entryBatchQueue; + @InterfaceAudience.Private + final BlockingQueue entryBatchQueue; // max (heap) size of each batch - multiply by number of batches in queue to get total private final long replicationBatchSizeCapacity; // max count of each batch - multiply by number of batches in queue to get total diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 796c0e3b18c7..50537b5e1be2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -22,7 +22,10 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.ArrayList; import java.util.OptionalLong; import java.util.UUID; import java.util.concurrent.ExecutorService; @@ -128,6 +131,8 @@ public void testDefaultSkipsMetaWAL() throws IOException { Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); + Mockito.when(manager.getGlobalMetrics()). + thenReturn(mock(MetricsReplicationGlobalSourceSource.class)); String queueId = "qid"; RegionServerServices rss = TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); @@ -269,6 +274,47 @@ public void testTerminateTimeout() throws Exception { } } + @Test + public void testTerminateClearsBuffer() throws Exception { + ReplicationSource source = new ReplicationSource(); + ReplicationSourceManager mockManager = mock(ReplicationSourceManager.class); + MetricsReplicationGlobalSourceSource mockMetrics = + mock(MetricsReplicationGlobalSourceSource.class); + AtomicLong buffer = new AtomicLong(); + Mockito.when(mockManager.getTotalBufferUsed()).thenReturn(buffer); + Mockito.when(mockManager.getGlobalMetrics()).thenReturn(mockMetrics); + ReplicationPeer mockPeer = mock(ReplicationPeer.class); + Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); + Configuration testConf = HBaseConfiguration.create(); + source.init(testConf, null, mockManager, null, mockPeer, null, + "testPeer", null, p -> OptionalLong.empty(), mock(MetricsSource.class)); + ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null, + conf, null, 0, null, source); + ReplicationSourceShipper shipper = + new ReplicationSourceShipper(conf, null, null, source); + shipper.entryReader = reader; + source.workerThreads.put("testPeer", shipper); + WALEntryBatch batch = new WALEntryBatch(10, logDir); + WAL.Entry mockEntry = mock(WAL.Entry.class); + WALEdit mockEdit = mock(WALEdit.class); + WALKeyImpl mockKey = mock(WALKeyImpl.class); + when(mockEntry.getEdit()).thenReturn(mockEdit); + when(mockEdit.isEmpty()).thenReturn(false); + when(mockEntry.getKey()).thenReturn(mockKey); + when(mockKey.estimatedSerializedSizeOf()).thenReturn(1000L); + when(mockEdit.heapSize()).thenReturn(10000L); + when(mockEdit.size()).thenReturn(0); + ArrayList cells = new ArrayList<>(); + KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"), + Bytes.toBytes("1"), Bytes.toBytes("v1")); + cells.add(kv); + when(mockEdit.getCells()).thenReturn(cells); + reader.addEntryToBatch(batch, mockEntry); + reader.entryBatchQueue.put(batch); + source.terminate("test"); + assertEquals(0, source.getSourceManager().getTotalBufferUsed().get()); + } + /** * Tests that recovered queues are preserved on a regionserver shutdown. * See HBASE-18192 @@ -438,12 +484,12 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception ServerName deadServer = ServerName.valueOf("www.deadServer.com", 12006, 1524679704419L); PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); queue.put(new Path("/www/html/test")); - RecoveredReplicationSource source = Mockito.mock(RecoveredReplicationSource.class); - Server server = Mockito.mock(Server.class); + RecoveredReplicationSource source = mock(RecoveredReplicationSource.class); + Server server = mock(Server.class); Mockito.when(server.getServerName()).thenReturn(serverName); Mockito.when(source.getServer()).thenReturn(server); Mockito.when(source.getServerWALsBelongTo()).thenReturn(deadServer); - ReplicationQueueStorage storage = Mockito.mock(ReplicationQueueStorage.class); + ReplicationQueueStorage storage = mock(ReplicationQueueStorage.class); Mockito.when(storage.getWALPosition(Mockito.eq(serverName), Mockito.any(), Mockito.any())) .thenReturn(1001L); Mockito.when(storage.getWALPosition(Mockito.eq(deadServer), Mockito.any(), Mockito.any())) @@ -468,6 +514,8 @@ private RegionServerServices setupForAbortTests(ReplicationSource rs, Configurat Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig); ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class); Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong()); + Mockito.when(manager.getGlobalMetrics()). + thenReturn(mock(MetricsReplicationGlobalSourceSource.class)); String queueId = "qid"; RegionServerServices rss = TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1")); From d977a7625b1d7811f9f11551085213dba129043e Mon Sep 17 00:00:00 2001 From: Gary Wang <34413055+whua3@users.noreply.github.com> Date: Tue, 5 Jan 2021 22:39:19 +0800 Subject: [PATCH 621/769] HBASE-25463 fix comment error of append.rb (#2845) Signed-off-by: Viraj Jasani --- hbase-shell/src/main/ruby/shell/commands/append.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-shell/src/main/ruby/shell/commands/append.rb b/hbase-shell/src/main/ruby/shell/commands/append.rb index a7273ca4edfe..b469f0bda638 100644 --- a/hbase-shell/src/main/ruby/shell/commands/append.rb +++ b/hbase-shell/src/main/ruby/shell/commands/append.rb @@ -50,5 +50,5 @@ def append(table, row, column, value, args = {}) end end -# add incr comamnd to Table +# add append command to Table ::Hbase::Table.add_shell_command('append') From 91c9193507d9bb7f76262520d47275ede7d2e064 Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 5 Jan 2021 19:39:39 -0800 Subject: [PATCH 622/769] =?UTF-8?q?HBASE-25438=20Update=20create-release?= =?UTF-8?q?=20mvn=20in=20Dockerfile;=20its=203.6.0;=20make=20=E2=80=A6=20(?= =?UTF-8?q?#2807)=20Addendum.=20Missing=20-L=20on=20added=20curl.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dev-support/create-release/hbase-rm/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/create-release/hbase-rm/Dockerfile b/dev-support/create-release/hbase-rm/Dockerfile index ac443b64228d..c43976f61dd1 100644 --- a/dev-support/create-release/hbase-rm/Dockerfile +++ b/dev-support/create-release/hbase-rm/Dockerfile @@ -59,7 +59,7 @@ RUN mkdir -p /opt/maven \ # Install Apache Yetus ENV YETUS_VERSION 0.12.0 -RUN curl "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ +RUN curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/yetus/${YETUS_VERSION}/apache-yetus-${YETUS_VERSION}-bin.tar.gz" | \ tar xvz -C /opt ENV YETUS_HOME /opt/apache-yetus-${YETUS_VERSION} From 85f7205de6b0e79598b5655ba84c937461acd112 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 6 Jan 2021 15:13:10 +0800 Subject: [PATCH 623/769] HBASE-25458 HRegion methods cleanup (#2838) Signed-off-by: meiyi --- .../hadoop/hbase/regionserver/HRegion.java | 225 ++++++++---------- .../hbase/regionserver/RSRpcServices.java | 3 +- .../regionserver/TestCompactingMemStore.java | 6 +- .../TestCompactingToCellFlatMapMemStore.java | 3 +- .../regionserver/TestDefaultMemStore.java | 23 +- .../hbase/regionserver/TestHRegion.java | 45 ++-- 6 files changed, 126 insertions(+), 179 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 493b74b6b9ac..4ec61ac5c051 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -913,17 +913,19 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co } } - void setHTableSpecificConf() { - if (this.htableDescriptor == null) return; + private void setHTableSpecificConf() { + if (this.htableDescriptor == null) { + return; + } long flushSize = this.htableDescriptor.getMemStoreFlushSize(); if (flushSize <= 0) { flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, - TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); + TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); } this.memstoreFlushSize = flushSize; long mult = conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, - HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); this.blockingMemStoreSize = this.memstoreFlushSize * mult; } @@ -1336,7 +1338,7 @@ public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration * Increase the size of mem store in this region and the size of global mem * store */ - void incMemStoreSize(MemStoreSize mss) { + private void incMemStoreSize(MemStoreSize mss) { incMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), mss.getCellsCount()); } @@ -1356,7 +1358,7 @@ void decrMemStoreSize(MemStoreSize mss) { mss.getCellsCount()); } - void decrMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, + private void decrMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, int cellsCountDelta) { if (this.rsAccounting != null) { rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, offHeapSizeDelta); @@ -1987,7 +1989,7 @@ public boolean waitForFlushes(long timeout) { } } - protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool( + private ThreadPoolExecutor getStoreOpenAndCloseThreadPool( final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount()); int maxThreads = Math.min(numStores, @@ -1996,7 +1998,7 @@ protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool( return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } - protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( + ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount()); int maxThreads = Math.max(1, @@ -2006,7 +2008,7 @@ protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } - static ThreadPoolExecutor getOpenAndCloseThreadPool(int maxThreads, + private static ThreadPoolExecutor getOpenAndCloseThreadPool(int maxThreads, final String threadNamePrefix) { return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() { @@ -2475,11 +2477,11 @@ enum Result { boolean isCompactionNeeded(); } - public FlushResultImpl flushcache(boolean flushAllStores, boolean writeFlushRequestWalMarker, + FlushResultImpl flushcache(boolean flushAllStores, boolean writeFlushRequestWalMarker, FlushLifeCycleTracker tracker) throws IOException { - List families = null; + List families = null; if (flushAllStores) { - families = new ArrayList(); + families = new ArrayList<>(); families.addAll(this.getTableDescriptor().getColumnFamilyNames()); } return this.flushcache(families, writeFlushRequestWalMarker, tracker); @@ -2960,7 +2962,7 @@ private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarke @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", justification="Intentional; notify is about completed flush") - protected FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask status, + FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask status, PrepareFlushResult prepareResult, Collection storesToFlush) throws IOException { // prepare flush context is carried via PrepareFlushResult TreeMap storeFlushCtxs = prepareResult.storeFlushCtxs; @@ -3157,12 +3159,6 @@ private RegionScannerImpl getScanner(Scan scan, List additional } } - protected RegionScanner instantiateRegionScanner(Scan scan, - List additionalScanners) throws IOException { - return instantiateRegionScanner(scan, additionalScanners, HConstants.NO_NONCE, - HConstants.NO_NONCE); - } - protected RegionScannerImpl instantiateRegionScanner(Scan scan, List additionalScanners, long nonceGroup, long nonce) throws IOException { if (scan.isReversed()) { @@ -3177,9 +3173,8 @@ protected RegionScannerImpl instantiateRegionScanner(Scan scan, /** * Prepare a delete for a row mutation processor * @param delete The passed delete is modified by this method. WARNING! - * @throws IOException */ - public void prepareDelete(Delete delete) throws IOException { + private void prepareDelete(Delete delete) throws IOException { // Check to see if this is a deleteRow insert if(delete.getFamilyCellMap().isEmpty()){ for(byte [] family : this.htableDescriptor.getColumnFamilyNames()){ @@ -3203,38 +3198,18 @@ public void delete(Delete delete) throws IOException { startRegionOperation(Operation.DELETE); try { // All edits for the given row (across all column families) must happen atomically. - doBatchMutate(delete); + mutate(delete); } finally { closeRegionOperation(Operation.DELETE); } } - /** - * Row needed by below method. - */ - private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); - - /** - * This is used only by unit tests. Not required to be a public API. - * @param familyMap map of family to edits for the given family. - * @throws IOException - */ - void delete(NavigableMap> familyMap, - Durability durability) throws IOException { - Delete delete = new Delete(FOR_UNIT_TESTS_ONLY, HConstants.LATEST_TIMESTAMP, familyMap); - delete.setDurability(durability); - doBatchMutate(delete); - } - /** * Set up correct timestamps in the KVs in Delete object. - *

    Caller should have the row and region locks. - * @param mutation - * @param familyMap - * @param byteNow - * @throws IOException + *

    + * Caller should have the row and region locks. */ - public void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, + private void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, byte[] byteNow) throws IOException { for (Map.Entry> e : familyMap.entrySet()) { @@ -3278,7 +3253,7 @@ public void prepareDeleteTimestamps(Mutation mutation, Map> f } } - void updateDeleteLatestVersionTimestamp(Cell cell, Get get, int count, byte[] byteNow) + private void updateDeleteLatestVersionTimestamp(Cell cell, Get get, int count, byte[] byteNow) throws IOException { List result = get(get, false); @@ -3306,7 +3281,7 @@ public void put(Put put) throws IOException { startRegionOperation(Operation.PUT); try { // All edits for the given row (across all column families) must happen atomically. - doBatchMutate(put); + mutate(put); } finally { closeRegionOperation(Operation.PUT); } @@ -3353,7 +3328,7 @@ public BatchOperation(final HRegion region, T[] operations) { * Visitor interface for batch operations */ @FunctionalInterface - public interface Visitor { + interface Visitor { /** * @param index operation index * @return If true continue visiting remaining entries, break otherwise @@ -3759,14 +3734,17 @@ protected void applyFamilyMapToMemStore(Map> familyMap, /** - * Batch of mutation operations. Base class is shared with {@link ReplayBatchOperation} as most - * of the logic is same. + * Batch of mutation operations. Base class is shared with {@link ReplayBatchOperation} as most of + * the logic is same. */ - static class MutationBatchOperation extends BatchOperation { + private static class MutationBatchOperation extends BatchOperation { + private long nonceGroup; + private long nonce; + public MutationBatchOperation(final HRegion region, Mutation[] operations, boolean atomic, - long nonceGroup, long nonce) { + long nonceGroup, long nonce) { super(region, operations); this.atomic = atomic; this.nonceGroup = nonceGroup; @@ -4401,10 +4379,12 @@ private void mergeFamilyMaps(Map> familyMap, * Batch of mutations for replay. Base class is shared with {@link MutationBatchOperation} as most * of the logic is same. */ - static class ReplayBatchOperation extends BatchOperation { + private static final class ReplayBatchOperation extends BatchOperation { + private long origLogSeqNum = 0; + public ReplayBatchOperation(final HRegion region, MutationReplay[] operations, - long origLogSeqNum) { + long origLogSeqNum) { super(region, operations); this.origLogSeqNum = origLogSeqNum; } @@ -4512,12 +4492,12 @@ public void completeMiniBatchOperations( } } - public OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, - long nonce) throws IOException { + private OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, + long nonce) throws IOException { // As it stands, this is used for 3 things - // * batchMutate with single mutation - put/delete/increment/append, separate or from - // checkAndMutate. - // * coprocessor calls (see ex. BulkDeleteEndpoint). + // * batchMutate with single mutation - put/delete/increment/append, separate or from + // checkAndMutate. + // * coprocessor calls (see ex. BulkDeleteEndpoint). // So nonces are not really ever used by HBase. They could be by coprocs, and checkAnd... return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce)); } @@ -4525,8 +4505,12 @@ public OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long @Override public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException { // If the mutations has any Increment/Append operations, we need to do batchMutate atomically - boolean atomic = Arrays.stream(mutations) - .anyMatch(m -> m instanceof Increment || m instanceof Append); + boolean atomic = + Arrays.stream(mutations).anyMatch(m -> m instanceof Increment || m instanceof Append); + return batchMutate(mutations, atomic); + } + + OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic) throws IOException { return batchMutate(mutations, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); } @@ -4556,24 +4540,23 @@ public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqI /** * Perform a batch of mutations. - * + *

    * Operations in a batch are stored with highest durability specified of for all operations in a * batch, except for {@link Durability#SKIP_WAL}. - * - *

    This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with + *

    + * This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with * {@link ReplayBatchOperation} instance and {@link #batchMutate(Mutation[])} with - * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch - * and mutation batch is very similar, lot of code is shared by providing generic methods in - * base class {@link BatchOperation}. The logic for this method and - * {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which - * are overridden by derived classes to implement special behavior. - * + * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch and + * mutation batch is very similar, lot of code is shared by providing generic methods in base + * class {@link BatchOperation}. The logic for this method and + * {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which are + * overridden by derived classes to implement special behavior. * @param batchOp contains the list of mutations - * @return an array of OperationStatus which internally contains the - * OperationStatusCode and the exceptionMessage if any. + * @return an array of OperationStatus which internally contains the OperationStatusCode and the + * exceptionMessage if any. * @throws IOException if an IO problem is encountered */ - OperationStatus[] batchMutate(BatchOperation batchOp) throws IOException { + private OperationStatus[] batchMutate(BatchOperation batchOp) throws IOException { boolean initialized = false; batchOp.startRegionOperation(); try { @@ -4727,7 +4710,7 @@ private void doMiniBatchMutate(BatchOperation batchOp) throws IOException { * Returns effective durability from the passed durability and * the table descriptor. */ - protected Durability getEffectiveDurability(Durability d) { + private Durability getEffectiveDurability(Durability d) { return d == Durability.USE_DEFAULT ? this.regionDurability : d; } @@ -4916,7 +4899,7 @@ public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws // All edits for the given row (across all column families) must happen atomically. Result r; if (mutation != null) { - r = doBatchMutate(mutation, true).getResult(); + r = mutate(mutation, true).getResult(); } else { r = mutateRow(rowMutations); } @@ -4976,27 +4959,26 @@ private boolean matches(final CompareOperator op, final int compareResult) { return matches; } - private OperationStatus doBatchMutate(Mutation mutation) throws IOException { - return doBatchMutate(mutation, false); + private OperationStatus mutate(Mutation mutation) throws IOException { + return mutate(mutation, false); } - private OperationStatus doBatchMutate(Mutation mutation, boolean atomic) throws IOException { - return doBatchMutate(mutation, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); + private OperationStatus mutate(Mutation mutation, boolean atomic) throws IOException { + return mutate(mutation, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); } - private OperationStatus doBatchMutate(Mutation mutation, boolean atomic, long nonceGroup, - long nonce) throws IOException { - OperationStatus[] batchMutate = this.batchMutate(new Mutation[]{mutation}, atomic, - nonceGroup, nonce); - if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { - throw new FailedSanityCheckException(batchMutate[0].getExceptionMsg()); - } else if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { - throw new NoSuchColumnFamilyException(batchMutate[0].getExceptionMsg()); - } else if (batchMutate[0].getOperationStatusCode().equals( - OperationStatusCode.STORE_TOO_BUSY)) { - throw new RegionTooBusyException(batchMutate[0].getExceptionMsg()); + private OperationStatus mutate(Mutation mutation, boolean atomic, long nonceGroup, long nonce) + throws IOException { + OperationStatus[] status = + this.batchMutate(new Mutation[] { mutation }, atomic, nonceGroup, nonce); + if (status[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { + throw new FailedSanityCheckException(status[0].getExceptionMsg()); + } else if (status[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { + throw new NoSuchColumnFamilyException(status[0].getExceptionMsg()); + } else if (status[0].getOperationStatusCode().equals(OperationStatusCode.STORE_TOO_BUSY)) { + throw new RegionTooBusyException(status[0].getExceptionMsg()); } - return batchMutate[0]; + return status[0]; } /** @@ -5055,7 +5037,7 @@ private static void updateCellTimestamps(final Iterable> cellItr, fin /** * Possibly rewrite incoming cell tags. */ - void rewriteCellTags(Map> familyMap, final Mutation m) { + private void rewriteCellTags(Map> familyMap, final Mutation m) { // Check if we have any work to do and early out otherwise // Update these checks as more logic is added here if (m.getTTL() == Long.MAX_VALUE) { @@ -5077,15 +5059,17 @@ void rewriteCellTags(Map> familyMap, final Mutation m) { } } - /* + /** * Check if resources to support an update. - * - * We throw RegionTooBusyException if above memstore limit - * and expect client to retry using some kind of backoff - */ - void checkResources() throws RegionTooBusyException { + *

    + * We throw RegionTooBusyException if above memstore limit and expect client to retry using some + * kind of backoff + */ + private void checkResources() throws RegionTooBusyException { // If catalog region, do not impose resource constraints or block updates. - if (this.getRegionInfo().isMetaRegion()) return; + if (this.getRegionInfo().isMetaRegion()) { + return; + } MemStoreSize mss = this.memStoreSizing.getMemStoreSize(); if (mss.getHeapSize() + mss.getOffHeapSize() > this.blockingMemStoreSize) { @@ -5110,13 +5094,13 @@ void checkResources() throws RegionTooBusyException { /** * @throws IOException Throws exception if region is in read-only mode. */ - protected void checkReadOnly() throws IOException { + private void checkReadOnly() throws IOException { if (isReadOnly()) { throw new DoNotRetryIOException("region is read only"); } } - protected void checkReadsEnabled() throws IOException { + private void checkReadsEnabled() throws IOException { if (!this.writestate.readsEnabled) { throw new IOException(getRegionInfo().getEncodedName() + ": The region's reads are disabled. Cannot serve the request"); @@ -5130,21 +5114,6 @@ public void setReadsEnabled(boolean readsEnabled) { this.writestate.setReadsEnabled(readsEnabled); } - /** - * Add updates first to the wal and then add values to memstore. - *

    - * Warning: Assumption is caller has lock on passed in row. - * @param edits Cell updates by column - */ - void put(final byte[] row, byte[] family, List edits) throws IOException { - NavigableMap> familyMap; - familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - - familyMap.put(family, edits); - Put p = new Put(row, HConstants.LATEST_TIMESTAMP, familyMap); - doBatchMutate(p); - } - /** * @param delta If we are doing delta changes -- e.g. increment/append -- then this flag will be * set; when set we will run operations that make sense in the increment/append scenario @@ -5194,7 +5163,7 @@ private void checkFamily(final byte[] family, Durability durability) } } - void checkFamily(final byte[] family) throws NoSuchColumnFamilyException { + private void checkFamily(final byte[] family) throws NoSuchColumnFamilyException { if (!this.htableDescriptor.hasColumnFamily(family)) { throw new NoSuchColumnFamilyException( "Column family " + Bytes.toString(family) + " does not exist in region " + this @@ -6055,7 +6024,7 @@ private long loadRecoveredHFilesIfAny(Collection stores) throws IOExcept * Currently, this method is used to drop memstore to prevent memory leak * when replaying recovered.edits while opening region. */ - public MemStoreSize dropMemStoreContents() throws IOException { + private MemStoreSize dropMemStoreContents() throws IOException { MemStoreSizing totalFreedSize = new NonThreadSafeMemStoreSizing(); this.updatesLock.writeLock().lock(); try { @@ -8106,11 +8075,11 @@ public static Region openHRegion(final Region other, final CancelableProgressabl /** * Open HRegion. + *

    * Calls initialize and sets sequenceId. * @return Returns this */ - protected HRegion openHRegion(final CancelableProgressable reporter) - throws IOException { + private HRegion openHRegion(final CancelableProgressable reporter) throws IOException { try { // Refuse to open the region if we are missing local compression support TableDescriptorChecker.checkCompression(htableDescriptor); @@ -8255,7 +8224,7 @@ public List get(Get get, boolean withCoprocessor) throws IOException { return get(get, withCoprocessor, HConstants.NO_NONCE, HConstants.NO_NONCE); } - public List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) + private List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) throws IOException { List results = new ArrayList<>(); long before = EnvironmentEdgeManager.currentTime(); @@ -8619,7 +8588,7 @@ public Result append(Append append, long nonceGroup, long nonce) throws IOExcept startRegionOperation(Operation.APPEND); try { // All edits for the given row (across all column families) must happen atomically. - return doBatchMutate(append, true, nonceGroup, nonce).getResult(); + return mutate(append, true, nonceGroup, nonce).getResult(); } finally { closeRegionOperation(Operation.APPEND); } @@ -8636,7 +8605,7 @@ public Result increment(Increment increment, long nonceGroup, long nonce) throws startRegionOperation(Operation.INCREMENT); try { // All edits for the given row (across all column families) must happen atomically. - return doBatchMutate(increment, true, nonceGroup, nonce).getResult(); + return mutate(increment, true, nonceGroup, nonce).getResult(); } finally { closeRegionOperation(Operation.INCREMENT); } @@ -9176,15 +9145,11 @@ public void incrementFlushesQueuedCount() { flushesQueued.increment(); } - public long getReadPoint() { - return getReadPoint(IsolationLevel.READ_COMMITTED); - } - /** * If a handler thread is eligible for interrupt, make it ineligible. Should be paired * with {{@link #enableInterrupts()}. */ - protected void disableInterrupts() { + void disableInterrupts() { regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> false); } @@ -9192,7 +9157,7 @@ protected void disableInterrupts() { * If a handler thread was made ineligible for interrupt via {{@link #disableInterrupts()}, * make it eligible again. No-op if interrupts are already enabled. */ - protected void enableInterrupts() { + void enableInterrupts() { regionLockHolders.computeIfPresent(Thread.currentThread(), (t,b) -> true); } @@ -9364,7 +9329,7 @@ public void requestFlush(FlushLifeCycleTracker tracker) throws IOException { * features * @param conf region configurations */ - static void decorateRegionConfiguration(Configuration conf) { + private static void decorateRegionConfiguration(Configuration conf) { if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) { String plugins = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,""); String replicationCoprocessorClass = ReplicationObserver.class.getCanonicalName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 78926d6c39d5..f8323c6a1164 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1003,8 +1003,7 @@ private void doBatchOp(final RegionActionResult.Builder builder, final HRegion r Arrays.sort(mArray, (v1, v2) -> Row.COMPARATOR.compare(v1, v2)); } - OperationStatus[] codes = region.batchMutate(mArray, atomic, HConstants.NO_NONCE, - HConstants.NO_NONCE); + OperationStatus[] codes = region.batchMutate(mArray, atomic); // When atomic is true, it indicates that the mutateRow API or the batch API with // RowMutations is called. In this case, we need to merge the results of the diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 9b336c21fc67..673369091d22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -710,8 +710,7 @@ public void testCompaction2Buckets() throws IOException { mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot // simulate flusher - region.decrMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), - mss.getCellsCount()); + region.decrMemStoreSize(mss); ImmutableSegment s = memstore.getSnapshot(); assertEquals(7, s.getCellsCount()); assertEquals(0, regionServicesForStores.getMemStoreSize()); @@ -788,8 +787,7 @@ public void testCompaction3Buckets() throws IOException { mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot // simulate flusher - region.decrMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), - mss.getCellsCount()); + region.decrMemStoreSize(mss); ImmutableSegment s = memstore.getSnapshot(); assertEquals(4, s.getCellsCount()); assertEquals(0, regionServicesForStores.getMemStoreSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index 617caeccd81e..072daa80210a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -282,8 +282,7 @@ public void testCompaction3Buckets() throws IOException { mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot // simulate flusher - region.decrMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), - mss.getCellsCount()); + region.decrMemStoreSize(mss); ImmutableSegment s = memstore.getSnapshot(); assertEquals(4, s.getCellsCount()); assertEquals(0, regionServicesForStores.getMemStoreSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 12bfc667c2d7..986ffd0b4c54 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -26,7 +26,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.NavigableMap; import java.util.Objects; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; @@ -971,28 +973,23 @@ public void testShouldFlushMeta() throws Exception { } /** - * Inserts a new region's meta information into the passed - * meta region. Used by the HMaster bootstrap code adding - * new table to hbase:meta table. - * + * Inserts a new region's meta information into the passed meta region. * @param meta hbase:meta HRegion to be updated * @param r HRegion to add to meta - * - * @throws IOException */ - public static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException { - meta.checkResources(); + private static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException { // The row key is the region name byte[] row = r.getRegionInfo().getRegionName(); final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList<>(2); - cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER, now, RegionInfo.toByteArray(r.getRegionInfo()))); + cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, now, + RegionInfo.toByteArray(r.getRegionInfo()))); // Set into the root table the version of the meta table. - cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - HConstants.META_VERSION_QUALIFIER, now, + cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER, now, Bytes.toBytes(HConstants.META_VERSION))); - meta.put(row, HConstants.CATALOG_FAMILY, cells); + NavigableMap> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + familyMap.put(HConstants.CATALOG_FAMILY, cells); + meta.put(new Put(row, HConstants.LATEST_TIMESTAMP, familyMap)); } private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index fcbc718296ae..58668933c61f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -26,6 +26,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; @@ -43,7 +44,6 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.math.BigDecimal; -import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -137,7 +137,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.regionserver.HRegion.MutationBatchOperation; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.Region.RowLock; @@ -1679,9 +1678,7 @@ public void testAtomicBatchPut() throws IOException { long syncs = prepareRegionForBachPut(puts, source, false); // 1. Straight forward case, should succeed - MutationBatchOperation batchOp = new MutationBatchOperation(region, puts, true, - HConstants.NO_NONCE, HConstants.NO_NONCE); - OperationStatus[] codes = this.region.batchMutate(batchOp); + OperationStatus[] codes = this.region.batchMutate(puts, true); assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { assertEquals(OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode()); @@ -1695,15 +1692,11 @@ public void testAtomicBatchPut() throws IOException { MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(CONF); final AtomicReference retFromThread = new AtomicReference<>(); final CountDownLatch finishedPuts = new CountDownLatch(1); - final MutationBatchOperation finalBatchOp = new MutationBatchOperation(region, puts, true, - HConstants - .NO_NONCE, - HConstants.NO_NONCE); TestThread putter = new TestThread(ctx) { @Override public void doWork() throws IOException { try { - region.batchMutate(finalBatchOp); + region.batchMutate(puts, true); } catch (IOException ioe) { LOG.error("test failed!", ioe); retFromThread.set(ioe); @@ -1730,10 +1723,8 @@ public void doWork() throws IOException { // 3. Exception thrown in validation LOG.info("Next a batch put with one invalid family"); puts[5].addColumn(Bytes.toBytes("BAD_CF"), qual, value); - batchOp = new MutationBatchOperation(region, puts, true, HConstants.NO_NONCE, - HConstants.NO_NONCE); thrown.expect(NoSuchColumnFamilyException.class); - this.region.batchMutate(batchOp); + this.region.batchMutate(puts, true); } @Test @@ -3172,23 +3163,19 @@ public void testDelete_CheckFamily() throws IOException { List kvs = new ArrayList<>(); kvs.add(new KeyValue(row1, fam4, null, null)); + byte[] forUnitTestsOnly = Bytes.toBytes("ForUnitTestsOnly"); + // testing existing family - byte[] family = fam2; NavigableMap> deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - deleteMap.put(family, kvs); - region.delete(deleteMap, Durability.SYNC_WAL); + deleteMap.put(fam2, kvs); + region.delete(new Delete(forUnitTestsOnly, HConstants.LATEST_TIMESTAMP, deleteMap)); // testing non existing family - boolean ok = false; - family = fam4; - try { - deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - deleteMap.put(family, kvs); - region.delete(deleteMap, Durability.SYNC_WAL); - } catch (Exception e) { - ok = true; - } - assertTrue("Family " + new String(family, StandardCharsets.UTF_8) + " does exist", ok); + NavigableMap> deleteMap2 = new TreeMap<>(Bytes.BYTES_COMPARATOR); + deleteMap2.put(fam4, kvs); + assertThrows("Family " + Bytes.toString(fam4) + " does exist", + NoSuchColumnFamilyException.class, + () -> region.delete(new Delete(forUnitTestsOnly, HConstants.LATEST_TIMESTAMP, deleteMap2))); } @Test @@ -3549,6 +3536,8 @@ public void testDelete_CheckTimestampUpdated() throws IOException { byte[] col2 = Bytes.toBytes("col2"); byte[] col3 = Bytes.toBytes("col3"); + byte[] forUnitTestsOnly = Bytes.toBytes("ForUnitTestsOnly"); + // Setting up region this.region = initHRegion(tableName, method, CONF, fam1); // Building checkerList @@ -3559,12 +3548,12 @@ public void testDelete_CheckTimestampUpdated() throws IOException { NavigableMap> deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); deleteMap.put(fam1, kvs); - region.delete(deleteMap, Durability.SYNC_WAL); + region.delete(new Delete(forUnitTestsOnly, HConstants.LATEST_TIMESTAMP, deleteMap)); // extract the key values out the memstore: // This is kinda hacky, but better than nothing... long now = System.currentTimeMillis(); - AbstractMemStore memstore = (AbstractMemStore)region.getStore(fam1).memstore; + AbstractMemStore memstore = (AbstractMemStore) region.getStore(fam1).memstore; Cell firstCell = memstore.getActive().first(); assertTrue(firstCell.getTimestamp() <= now); now = firstCell.getTimestamp(); From adf9acf96a10d437ccf1635e34b0f89a8966e61a Mon Sep 17 00:00:00 2001 From: mokai Date: Wed, 6 Jan 2021 22:27:58 +0800 Subject: [PATCH 624/769] HBASE-24755 [LOG][RSGroup]Error message is confusing while adding a offline RS to rsgroup (#2846) Signed-off-by: Viraj Jasani Signed-off-by: Wellington Chevreuil --- .../apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java | 6 +++--- .../org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 9850917e795d..3ef9365456fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -1233,9 +1233,9 @@ public void moveServers(Set

    servers, String targetGroupName) throws IOE Address firstServer = servers.iterator().next(); RSGroupInfo srcGrp = getRSGroupOfServer(firstServer); if (srcGrp == null) { - // Be careful. This exception message is tested for in TestRSGroupsBase... - throw new ConstraintException("Source RSGroup for server " + firstServer - + " does not exist."); + // Be careful. This exception message is tested for in TestRSGroupAdmin2... + throw new ConstraintException("Server " + firstServer + + " is either offline or it does not exist."); } // Only move online servers (when moving from 'default') or servers from other diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index 983414236c3b..f31e80fa7ee6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -189,7 +189,7 @@ public void testMoveServers() throws Exception { ADMIN.moveServersToRSGroup(Sets.newHashSet(Address.fromString("foo:9999")), "foo"); fail("Bogus servers shouldn't have been successfully moved."); } catch (IOException ex) { - String exp = "Source RSGroup for server foo:9999 does not exist."; + String exp = "Server foo:9999 is either offline or it does not exist."; String msg = "Expected '" + exp + "' in exception message: "; assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp)); } @@ -337,7 +337,7 @@ public boolean evaluate() throws Exception { ADMIN.setRSGroup(Sets.newHashSet(tableName), newGroup.getName()); fail("Bogus servers shouldn't have been successfully moved."); } catch (IOException ex) { - String exp = "Source RSGroup for server foo:9999 does not exist."; + String exp = "Server foo:9999 is either offline or it does not exist."; String msg = "Expected '" + exp + "' in exception message: "; assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp)); } From 1410c76c46140c7d6105caed3440c1d25fad9640 Mon Sep 17 00:00:00 2001 From: Anjan Das Date: Thu, 7 Jan 2021 15:31:50 +0530 Subject: [PATCH 625/769] HBASE-25445: Use WAL FS instead of master FS in SplitWALManager (#2844) Signed-off-by: Pankaj Signed-off-by: ramkrish86 Signed-off-by: Viraj Jasani --- .../hadoop/hbase/master/SplitWALManager.java | 3 +- .../hbase/master/TestSplitWALManager.java | 64 +++++++++++++++++++ 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index aa91c84cb672..6db094c4e6df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -85,8 +85,7 @@ public SplitWALManager(MasterServices master) throws IOException { this.splitWorkerAssigner = new SplitWorkerAssigner(this.master, conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER)); this.rootDir = master.getMasterFileSystem().getWALRootDir(); - // TODO: This should be the WAL FS, not the Master FS? - this.fs = master.getMasterFileSystem().getFileSystem(); + this.fs = master.getMasterFileSystem().getWALFileSystem(); this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index 7edb011f97f4..10eda749891d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -31,6 +31,14 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -43,6 +51,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.junit.After; @@ -54,6 +63,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category({ MasterTests.class, LargeTests.class }) @@ -63,6 +74,7 @@ public class TestSplitWALManager { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSplitWALManager.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSplitWALManager.class); private static HBaseTestingUtility TEST_UTIL; private HMaster master; private SplitWALManager splitWALManager; @@ -86,6 +98,58 @@ public void teardown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + @Test + public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{ + HBaseTestingUtility test_util_2 = new HBaseTestingUtility(); + Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); + test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); + CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir); + test_util_2.startMiniCluster(3); + HMaster master2 = test_util_2.getHBaseCluster().getMaster(); + LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem() + .getFileSystem().getUri()); + LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem() + .getWALFileSystem().getUri()); + Table table = test_util_2.createTable(TABLE_NAME, FAMILY); + test_util_2.waitTableAvailable(TABLE_NAME); + Admin admin = test_util_2.getAdmin(); + MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster() + .getMasterProcedureExecutor().getEnvironment(); + final ProcedureExecutor executor = test_util_2.getMiniHBaseCluster() + .getMaster().getMasterProcedureExecutor(); + List regionInfos = admin.getRegions(TABLE_NAME); + SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure( + env, regionInfos.get(0), Bytes.toBytes("row5")); + // Populate some rows in the table + LOG.info("Beginning put data to the table: " + TABLE_NAME.toString()); + int rowCount = 5; + for (int i = 0; i < rowCount; i++) { + byte[] row = Bytes.toBytes("row" + i); + Put put = new Put(row); + put.addColumn(FAMILY, FAMILY, FAMILY); + table.put(put); + } + executor.submitProcedure(splitProcedure); + LOG.info("Submitted SplitProcedure."); + test_util_2.waitFor(30000, () -> executor.getProcedures().stream() + .filter(p -> p instanceof TransitRegionStateProcedure) + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); + test_util_2.getMiniHBaseCluster().killRegionServer( + test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName()); + test_util_2.getMiniHBaseCluster().startRegionServer(); + test_util_2.waitUntilNoRegionsInTransition(); + Scan scan = new Scan(); + ResultScanner results = table.getScanner(scan); + int scanRowCount = 0; + while (results.next() != null) { + scanRowCount++; + } + Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount + + " were expected.", rowCount, scanRowCount); + test_util_2.shutdownMiniCluster(); + } + @Test public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>(); From 2eeaa2af63852782d84c50fbf11ce133b67b6e63 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 7 Jan 2021 09:44:33 -0800 Subject: [PATCH 626/769] HBASE-25468 Update git-jira-audit fallback_actions file with recent exceptions (#2852) Signed-off-by: huaxiangsun --- .../fallback_actions.csv | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/dev-support/git-jira-release-audit/fallback_actions.csv b/dev-support/git-jira-release-audit/fallback_actions.csv index eb6c97c1c567..72bdf8f419b2 100644 --- a/dev-support/git-jira-release-audit/fallback_actions.csv +++ b/dev-support/git-jira-release-audit/fallback_actions.csv @@ -22,12 +22,16 @@ hexsha,action,jira_id 0057cd8ca7ff09ed6b794af71df301c5c47487f4,SKIP, 022f30ce0dd3dd931f6045c6778e194ef5c41f7a,SKIP, +048cee6e47022194a1c2bf84cdb9e2873c7f74dd,SKIP, 0505072c5182841ad1a28d798527c69bcc3348f0,SKIP, +057d83cfafd8d659576869f1e71e3e75029fbad3,SKIP, 05cb051423953b913156e4950b67f3d9b28ada5f,REVERT,HBASE-14391 05f8e94191ef6a63baadf56d6114d7d0317796f2,SKIP, 0791b878422eadf00b55076338f09bf059f39f0c,SKIP, 07f9f3d38cf4d0d01044ab28d90a50a1a009f6b8,SKIP, +081d65de93587f77c22675497c5f3314bf21ded3,SKIP, 0bff1305134b9c3a0bcad21900f5af68a8aedb4a,SKIP, +0ebc96e0491dde1aed25f7a5f7ba1df5ed2042c5,SKIP, 10f00547627076d79d77cf58dd2deaece2287084,ADD,HBASE-22330 10f3b77748a02a2c11635c33964929c0474e890d,SKIP, 1196e42362312080d3c523c107b5e8fefef9e57e,SKIP, @@ -63,19 +67,23 @@ hexsha,action,jira_id 2e4544a8b00766248c998850f8907511b8bae240,SKIP, 2e63f882c85fb5804aafff5d92503eca60c0820d,SKIP, 2ebd80499473bbac3eac083806211ec03e084db7,SKIP, +30ab9665068ba85ddfabf0d4e21f4da28d24404e,SKIP, 31b9096034e19171989fd5b76313e7e0f1a9a12a,SKIP, 31d37fb904c4fcd77e79f9df8db155c5a3d1d8ed,SKIP, 31fe5dbf6b3a261f2c902d0fd6b82bf6c7ecf954,REVERT,HBASE-19685 31fe5dbf6b3a261f2c902d0fd6b82bf6c7ecf954,SKIP, +34b2b48a6f6c5546f98a6716cfc6f5f001ed2f1d,SKIP, 34e97f9c08d97b38be9a8f7dda6214d7ae9c6ea8,SKIP, 34ecc75d1669e330c78c3e9b832eca0abf57902d,SKIP, 34fe1f5fd762e4ead3b0e2e820c360796939b315,SKIP, +361e81e1f893ae1bc923ef49d38b1832dbc6a253,SKIP, 37d46fcf85da772a06da29d9add8a0652330f6c5,SKIP, 38e2dbc503a7f9ef929ff11b615157f0ee79916c,SKIP, 3966d0fee6c9803cf567ef76d91855a1eaad621d,SKIP, 399b3e9d1bc68c2709565f0a1a719a9a66999564,SKIP, 39a4c56690eeeb2bb5ffaa0f3c8f6759b4fb3fb2,SKIP, 3a11028cdfc6e44576069bed452a0ed10c860db1,SKIP, +3a8b4d67386967b50a42941814801a2874d994eb,SKIP, 3b73ebb7b8975e18c67c24c258fbc061614bb7f2,SKIP, 3c7a349c2eab74a76c06b66df2e2d14ea7681f95,SKIP, 3dcb03947ce9cb1825167784992e689a23847351,ADD,HBASE-18290 @@ -118,6 +126,7 @@ hexsha,action,jira_id 6b54917d520d32d00f5b4e9420e0d4894aaa34e8,SKIP, 6cf647a0dfd696580c1d841e245d563beca451dd,SKIP, 6e376b900e125f71a71fd2a25c3ad08057b97f73,SKIP, +6f36c79c2fd0aadb204aed5a8f2459edfe153907,SKIP, 719993e0fe2b132b75a3689267ae4adff364b6aa,SKIP, 71ed7033675149956de855b6782e1e22fc908dc8,SKIP, 7242650afd466df511ba2d4cfa34f6d082cb1004,SKIP, @@ -137,10 +146,12 @@ hexsha,action,jira_id 7ea18e20680e86c200cbebc885ff91cfc1f72fac,SKIP, 80971f83d307ab661d830f1a2196729411873906,SKIP, 80d1f62cf7eaaeea569fe5a2e4a91fc270e7bc1f,SKIP, +825bdfb30413f205306debc14b120f1d33b52cc1,REVERT,HBASE-24713 829e6383d52e7a98947a4b2bdaa0b7e756bc6bfc,SKIP, 834488d435fb59d5cb2b0ed7f09b8b1e70d7e327,SKIP, 86242e1f55da7df6a2119389897d11356e6bbc2a,SKIP, 8670fb3339accf149d098552f523e9c14b90c941,SKIP, +87ce2cff979df88eed3ac2e530068fe2506a6fb6,SKIP, 880c7c35fc50f28ec3e072a4c62a348fc964e9e0,SKIP, 88ff206c57fac513b3c5442fd4369ced416279da,SKIP, 8aa1214a1722ba491d52cbbfab1b39cbd0eddeea,SKIP, @@ -149,6 +160,7 @@ hexsha,action,jira_id 8ef87ce4343e80321fcfd99594372759557c90f2,SKIP, 9213d7194ede5b723bc817a9bb634679ee3ce5c1,SKIP, 930f68c0b976a600066b838283a0f3dce050256f,SKIP, +94a03d7ae2ba2986fd359720704b88808d50f623,ADD,HBASE-24713 962d7e9bf06f4e2e569ba34acae6203b4deef778,ADD,HBASE-19074 97d7b3572cc661a8d31f82b9c567d7a75b9eef95,SKIP, 99e18fed23a2a476514fa4bd500b07a8d913e330,SKIP, @@ -163,7 +175,9 @@ a05cef75c4b33171ab29d89d0fbb0fbbc11d6d39,SKIP, a312705dbc8e6d604adcc874526294c72b8ff580,SKIP, a67481209f5d315f06e3a6910fa44493e398210f,REVERT,HBASE-16840 a72d40694116d84454f480c961c1cc1f5d7e1deb,SKIP, +a77829d5b7d627e904d13b9ffce41044b56d0feb,SKIP, a80799a3bc73513393f764df330704ad688140e8,SKIP, +aa5b28a7797564e021dd57626bebe911ad5da727,SKIP, aa8a9997792b686a606e8ada2cd34fb9ad895bc0,SKIP, aaeb488f43a9e79655275ddb481ba970b49d1173,SKIP, ac9035db199902533c07d80f384ae29c115d3ad5,SKIP, @@ -171,11 +185,17 @@ ad2064d8a5ff57d021852c3210a30c5f58eaa43c,SKIP, ad885a0baae21b943ffebef168c65650f8317023,SKIP, adec117e47a2ca503458954d6877667d877890fd,SKIP, ae95b1f215a120890de5454739651911749057ca,SKIP, +af1fa22e4dc824f8cb73ed682ee7c94fbae7a1c8,SKIP, +b0863c5832024033bc13efa3edb7c57b3b753996,SKIP, +b0863c5832024033bc13efa3edb7c57b3b753996,SKIP, b182030d48dcc89d8c26b98f2a58d7909957ea49,SKIP, +b33c200a28d6f26e68e3e2e651b7da463f030dc2,SKIP, b3d55441b8174c704ada4585603f6bcfca298843,SKIP, +b44cf90220ad58ab21852e451e505d4342ca022d,SKIP, b65231d04dbc565a578ce928e809aa51f5439857,SKIP, b6549007b313e8f3aa993d5c1ebd29c84ccb7b7b,SKIP, b6d4fc955fe0fc41f5225f1cc2e3e4b92029251c,SKIP, +b78f4367f710a8cb2b3df37ba158604e530301dc,SKIP, b9c676cdc048c52f927cfa906fd18ff412e4ca20,SKIP, b9f5c6b065ebd572193c1fdc9d38557320b42fe6,SKIP, bcadcef21048e4764f7ae8dec3ce52884f20c02c,SKIP, @@ -184,7 +204,9 @@ bd2c03dc7df600fe481ba7f2fed958deb18f5291,SKIP, bd4e14db07ea32a45c3ef734e06d195a405da67c,SKIP, bd4eba2b53b7af738fd9584511d737c4393d0855,SKIP, bef0616ef33306afca3060b96c2cba5f9762035d,SKIP, +c03ec837e70ebf014aabd8610d5fe4d53b239efa,SKIP, c100fb835a54be6002fe9704349e726f27b15b7a,SKIP, +c40b4781e4ae49308d5ac037364772de75f4f4e2,SKIP, c5e0a1397b3c6a14612e4c5b66f995c02de4310b,SKIP, c71da858ada94e1b93065f0b7caf3558942bc4da,SKIP, c89cfd3406823cf05fa83464c5ddee16bf0d473f,ADD,HBASE-17248 @@ -201,6 +223,7 @@ ce6a6014daded424d9460f7de4eadae169f52683,SKIP, cf1ccc30909bfb04326415e5a648605759d57360,SKIP, cf45c8d30a4d9810cd676b2a1a348141c4e27eeb,SKIP, d14e335edc9c22c30827bc75e73b5303ca64ee0d,SKIP, +d2c1886bf4df5746c05af7bc9b82715ead0b9d8e,ADD,HBASE-25450 d32230d0b5a4706b625cc7ac7ee7d28f44bd7b85,SKIP, d524768528cd15151ba1ebb82e32609da5308128,SKIP, d5a1b276270a1d41f21badd5b85d9502f8f9f415,SKIP, @@ -208,6 +231,7 @@ d6e85b0511396b3221cc7f495eaee5bbacc42afd,SKIP, d91908b0d46156fa364ba11d476b9cdbc01d0411,SKIP, da619282469c65dcf6bee06783c4246a24a1517c,SKIP, da8bcabb99ee5a9a35efd114aa45292616ca3c70,SKIP, +db7ad07081343df040b7d41b8881155257a02db5,SKIP, dfb1af48927a66aa5baa5b182e84327770b3c6c9,SKIP, e075492b4dac5c347b7f6b2e5318e2967b95b18b,SKIP, e08277ac8fe466bf63f6fc342256ab7b8d41243a,SKIP, @@ -218,11 +242,14 @@ e40fcee6b54712b76d702af6937c3320c60df2b9,SKIP, e501fe1a296be8fec0890e7e15414683aa3d933b,SKIP, e5349d589c000e395e12340e003aa9e2153afea6,SKIP, e5fb8214b2bfd6396539a4e8b6cf5f3cc5e9c06f,REVERT,HBASE-21874 +e67d7516ec4b4be0f0d9258af9f8c714b0bb7c58,SKIP, e869a20123afe326e198d35d110f5c0360ea244f,SKIP, e8e45ef8f2fb91a870399636b492d5cee58a4c39,SKIP, e92a147e1961366e36a39577816994566e1e21c5,SKIP, eacf3cb29641af1a68978d9bd7654f643a3aa3a1,SKIP, ec251bdd3649de7f30ece914c7930498e642527e,SKIP, +ec39d59161790d70e0b850b90dbd4101c5b6f895,SKIP, +ec39d59161790d70e0b850b90dbd4101c5b6f895,SKIP, ec39dc8c149b9f89a91596d57d27de812973f0a9,SKIP, ed520133d6dbb47a40f1883a56460582732f863a,SKIP, ed62e08786273587378b86278fae452dfc817dfb,SKIP, @@ -232,11 +259,15 @@ f0b1c4279eaf09d255336d1de9c2bc2b5d726e70,SKIP, f4acc47e2debb3d3d87c05436d940ef2fdfe0be3,SKIP, f6095adea64912deaebfaf2a6a5881b820d315b2,SKIP, f61f02b2b24af39545cc2754cfbc25122da60651,SKIP, +f66c80b6a655a6a39cdaba1af50918abcefff303,SKIP, f6d6bf59faa2a4a0767480af7658e4a844fd186f,SKIP, +f7bc7be1eb1ae7cd8ab09754845480e32a509384,SKIP, fab0b2e60385fca20021f74335a9c3d36368f621,SKIP, fb9be046aefb2e0b6e832dd00bc44a38ee62ab1f,SKIP, fc2ef413fab50d4375318fbd667051fd02f085f2,SKIP, fd5c5fb3887914183a1510f5972e50d9365e02f5,SKIP, +fd7beffcf92e1f435f4fd4aafb98057f067e9de4,SKIP, fe84833ea22c30b68022203132706ebb1e526852,SKIP, fe9e7483a316df9f5a62e9c215bcedcfd65c5f12,SKIP, ffcd4d424f69b4ecac1bd9f5980c14bb4b61a3fa,ADD,HBASE-13796 +ffeed7c6648391f02fd97d1da1fe4d210398437e,SKIP, From 154a669b6f06993a7ad8091d225abf084c1ce7e2 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 8 Jan 2021 17:50:08 +0800 Subject: [PATCH 627/769] HBASE-25471 Move RegionScannerImpl out of HRegion (#2853) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/regionserver/HRegion.java | 754 +---------------- .../hbase/regionserver/RSRpcServices.java | 1 - .../hbase/regionserver/RegionScannerImpl.java | 782 ++++++++++++++++++ .../ReversedRegionScannerImpl.java | 13 +- .../TestTransitRegionStateProcedure.java | 4 +- .../hbase/regionserver/TestHRegion.java | 3 +- .../regionserver/TestScanWithBloomError.java | 3 +- .../TestScannerHeartbeatMessages.java | 5 +- .../regionserver/TestSwitchToStreamRead.java | 6 +- .../hbase/regionserver/TestWideScanner.java | 2 +- 10 files changed, 802 insertions(+), 771 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 4ec61ac5c051..3b32f46ed044 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -30,7 +30,6 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.text.ParseException; -import java.util.AbstractList; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -100,7 +99,6 @@ import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; -import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; @@ -112,7 +110,6 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -132,14 +129,11 @@ import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FilterWrapper; -import org.apache.hadoop.hbase.filter.IncompatibleFilterException; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -148,8 +142,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry; -import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker; @@ -395,7 +387,7 @@ public void setRestoredRegion(boolean restoredRegion) { static final long DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L; final ExecutorService rowProcessorExecutor = Executors.newCachedThreadPool(); - private final ConcurrentHashMap scannerReadPoints; + final ConcurrentHashMap scannerReadPoints; /** * The sequence ID that was enLongAddered when this region was opened. @@ -904,8 +896,8 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co Pair retainedRWRequestsCnt = rsServices.getRegionServerAccounting() .getRetainedRegionRWRequestsCnt().get(getRegionInfo().getEncodedName()); if (retainedRWRequestsCnt != null) { - this.setReadRequestsCount(retainedRWRequestsCnt.getFirst()); - this.setWriteRequestsCount(retainedRWRequestsCnt.getSecond()); + this.addReadRequestsCount(retainedRWRequestsCnt.getFirst()); + this.addWriteRequestsCount(retainedRWRequestsCnt.getSecond()); // remove them since won't use again rsServices.getRegionServerAccounting().getRetainedRegionRWRequestsCnt() .remove(getRegionInfo().getEncodedName()); @@ -3160,12 +3152,12 @@ private RegionScannerImpl getScanner(Scan scan, List additional } protected RegionScannerImpl instantiateRegionScanner(Scan scan, - List additionalScanners, long nonceGroup, long nonce) throws IOException { + List additionalScanners, long nonceGroup, long nonce) throws IOException { if (scan.isReversed()) { if (scan.getFilter() != null) { scan.getFilter().setReversed(true); } - return new ReversedRegionScannerImpl(scan, additionalScanners, this); + return new ReversedRegionScannerImpl(scan, additionalScanners, this, nonceGroup, nonce); } return new RegionScannerImpl(scan, additionalScanners, this, nonceGroup, nonce); } @@ -4039,7 +4031,7 @@ private List reckonDeltasByStore(HStore store, Mutation mutation, long now // Sort the cells so that they match the order that they appear in the Get results. // Otherwise, we won't be able to find the existing values if the cells are not specified // in order by the client since cells are in an array list. - sort(deltas, store.getComparator()); + deltas.sort(store.getComparator()); // Get previous values for all columns in this family. Get get = new Get(mutation.getRow()); @@ -7086,702 +7078,6 @@ public String toString() { return getRegionInfo().getRegionNameAsString(); } - /** - * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). - */ - class RegionScannerImpl - implements RegionScanner, Shipper, org.apache.hadoop.hbase.ipc.RpcCallback { - // Package local for testability - KeyValueHeap storeHeap = null; - /** Heap of key-values that are not essential for the provided filters and are thus read - * on demand, if on-demand column family loading is enabled.*/ - KeyValueHeap joinedHeap = null; - /** - * If the joined heap data gathering is interrupted due to scan limits, this will - * contain the row for which we are populating the values.*/ - protected Cell joinedContinuationRow = null; - private boolean filterClosed = false; - - protected final byte[] stopRow; - protected final boolean includeStopRow; - protected final HRegion region; - protected final CellComparator comparator; - - private final long readPt; - private final long maxResultSize; - private final ScannerContext defaultScannerContext; - private final FilterWrapper filter; - - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); - } - - RegionScannerImpl(Scan scan, List additionalScanners, HRegion region) - throws IOException { - this(scan, additionalScanners, region, HConstants.NO_NONCE, HConstants.NO_NONCE); - } - - RegionScannerImpl(Scan scan, List additionalScanners, HRegion region, - long nonceGroup, long nonce) throws IOException { - this.region = region; - this.maxResultSize = scan.getMaxResultSize(); - if (scan.hasFilter()) { - this.filter = new FilterWrapper(scan.getFilter()); - } else { - this.filter = null; - } - this.comparator = region.getCellComparator(); - /** - * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default - * scanner context that can be used to enforce the batch limit in the event that a - * ScannerContext is not specified during an invocation of next/nextRaw - */ - defaultScannerContext = ScannerContext.newBuilder() - .setBatchLimit(scan.getBatch()).build(); - this.stopRow = scan.getStopRow(); - this.includeStopRow = scan.includeStopRow(); - - // synchronize on scannerReadPoints so that nobody calculates - // getSmallestReadPoint, before scannerReadPoints is updated. - IsolationLevel isolationLevel = scan.getIsolationLevel(); - long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); - synchronized (scannerReadPoints) { - if (mvccReadPoint > 0) { - this.readPt = mvccReadPoint; - } else if (nonce == HConstants.NO_NONCE || rsServices == null - || rsServices.getNonceManager() == null) { - this.readPt = getReadPoint(isolationLevel); - } else { - this.readPt = rsServices.getNonceManager().getMvccFromOperationContext(nonceGroup, nonce); - } - scannerReadPoints.put(this, this.readPt); - } - initializeScanners(scan, additionalScanners); - } - - protected void initializeScanners(Scan scan, List additionalScanners) - throws IOException { - // Here we separate all scanners into two lists - scanner that provide data required - // by the filter to operate (scanners list) and all others (joinedScanners list). - List scanners = new ArrayList<>(scan.getFamilyMap().size()); - List joinedScanners = new ArrayList<>(scan.getFamilyMap().size()); - // Store all already instantiated scanners for exception handling - List instantiatedScanners = new ArrayList<>(); - // handle additionalScanners - if (additionalScanners != null && !additionalScanners.isEmpty()) { - scanners.addAll(additionalScanners); - instantiatedScanners.addAll(additionalScanners); - } - - try { - for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { - HStore store = stores.get(entry.getKey()); - KeyValueScanner scanner = store.getScanner(scan, entry.getValue(), this.readPt); - instantiatedScanners.add(scanner); - if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand() - || this.filter.isFamilyEssential(entry.getKey())) { - scanners.add(scanner); - } else { - joinedScanners.add(scanner); - } - } - initializeKVHeap(scanners, joinedScanners, region); - } catch (Throwable t) { - throw handleException(instantiatedScanners, t); - } - } - - protected void initializeKVHeap(List scanners, - List joinedScanners, HRegion region) - throws IOException { - this.storeHeap = new KeyValueHeap(scanners, comparator); - if (!joinedScanners.isEmpty()) { - this.joinedHeap = new KeyValueHeap(joinedScanners, comparator); - } - } - - private IOException handleException(List instantiatedScanners, - Throwable t) { - // remove scaner read point before throw the exception - scannerReadPoints.remove(this); - if (storeHeap != null) { - storeHeap.close(); - storeHeap = null; - if (joinedHeap != null) { - joinedHeap.close(); - joinedHeap = null; - } - } else { - // close all already instantiated scanners before throwing the exception - for (KeyValueScanner scanner : instantiatedScanners) { - scanner.close(); - } - } - return t instanceof IOException ? (IOException) t : new IOException(t); - } - - @Override - public long getMaxResultSize() { - return maxResultSize; - } - - @Override - public long getMvccReadPoint() { - return this.readPt; - } - - @Override - public int getBatch() { - return this.defaultScannerContext.getBatchLimit(); - } - - /** - * Reset both the filter and the old filter. - * - * @throws IOException in case a filter raises an I/O exception. - */ - protected void resetFilters() throws IOException { - if (filter != null) { - filter.reset(); - } - } - - @Override - public boolean next(List outResults) - throws IOException { - // apply the batching limit by default - return next(outResults, defaultScannerContext); - } - - @Override - public synchronized boolean next(List outResults, ScannerContext scannerContext) - throws IOException { - if (this.filterClosed) { - throw new UnknownScannerException("Scanner was closed (timed out?) " + - "after we renewed it. Could be caused by a very slow scanner " + - "or a lengthy garbage collection"); - } - startRegionOperation(Operation.SCAN); - try { - return nextRaw(outResults, scannerContext); - } finally { - closeRegionOperation(Operation.SCAN); - } - } - - @Override - public boolean nextRaw(List outResults) throws IOException { - // Use the RegionScanner's context by default - return nextRaw(outResults, defaultScannerContext); - } - - @Override - public boolean nextRaw(List outResults, ScannerContext scannerContext) - throws IOException { - if (storeHeap == null) { - // scanner is closed - throw new UnknownScannerException("Scanner was closed"); - } - boolean moreValues = false; - if (outResults.isEmpty()) { - // Usually outResults is empty. This is true when next is called - // to handle scan or get operation. - moreValues = nextInternal(outResults, scannerContext); - } else { - List tmpList = new ArrayList<>(); - moreValues = nextInternal(tmpList, scannerContext); - outResults.addAll(tmpList); - } - - if (!outResults.isEmpty()) { - readRequestsCount.increment(); - if (metricsRegion != null) { - metricsRegion.updateReadRequestCount(); - } - } - if (rsServices != null && rsServices.getMetrics() != null) { - rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable()); - } - - // If the size limit was reached it means a partial Result is being returned. Returning a - // partial Result means that we should not reset the filters; filters should only be reset in - // between rows - if (!scannerContext.mayHaveMoreCellsInRow()) { - resetFilters(); - } - - if (isFilterDoneInternal()) { - moreValues = false; - } - return moreValues; - } - - /** - * @return true if more cells exist after this batch, false if scanner is done - */ - private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) - throws IOException { - assert joinedContinuationRow != null; - boolean moreValues = populateResult(results, this.joinedHeap, scannerContext, - joinedContinuationRow); - - if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - // We are done with this row, reset the continuation. - joinedContinuationRow = null; - } - // As the data is obtained from two independent heaps, we need to - // ensure that result list is sorted, because Result relies on that. - sort(results, comparator); - return moreValues; - } - - /** - * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is - * reached, or remainingResultSize (if not -1) is reaced - * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. - * @param scannerContext - * @param currentRowCell - * @return state of last call to {@link KeyValueHeap#next()} - */ - private boolean populateResult(List results, KeyValueHeap heap, - ScannerContext scannerContext, Cell currentRowCell) throws IOException { - Cell nextKv; - boolean moreCellsInRow = false; - boolean tmpKeepProgress = scannerContext.getKeepProgress(); - // Scanning between column families and thus the scope is between cells - LimitScope limitScope = LimitScope.BETWEEN_CELLS; - do { - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - - // We want to maintain any progress that is made towards the limits while scanning across - // different column families. To do this, we toggle the keep progress flag on during calls - // to the StoreScanner to ensure that any progress made thus far is not wiped away. - scannerContext.setKeepProgress(true); - heap.next(results, scannerContext); - scannerContext.setKeepProgress(tmpKeepProgress); - - nextKv = heap.peek(); - moreCellsInRow = moreCellsInRow(nextKv, currentRowCell); - if (!moreCellsInRow) incrementCountOfRowsScannedMetric(scannerContext); - if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) { - return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); - } else if (scannerContext.checkSizeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; - return scannerContext.setScannerState(state).hasMoreValues(); - } else if (scannerContext.checkTimeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED; - return scannerContext.setScannerState(state).hasMoreValues(); - } - } while (moreCellsInRow); - return nextKv != null; - } - - /** - * Based on the nextKv in the heap, and the current row, decide whether or not there are more - * cells to be read in the heap. If the row of the nextKv in the heap matches the current row - * then there are more cells to be read in the row. - * @param nextKv - * @param currentRowCell - * @return true When there are more cells in the row to be read - */ - private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell) { - return nextKv != null && CellUtil.matchingRows(nextKv, currentRowCell); - } - - /* - * @return True if a filter rules the scanner is over, done. - */ - @Override - public synchronized boolean isFilterDone() throws IOException { - return isFilterDoneInternal(); - } - - private boolean isFilterDoneInternal() throws IOException { - return this.filter != null && this.filter.filterAllRemaining(); - } - - private boolean nextInternal(List results, ScannerContext scannerContext) - throws IOException { - if (!results.isEmpty()) { - throw new IllegalArgumentException("First parameter should be an empty list"); - } - if (scannerContext == null) { - throw new IllegalArgumentException("Scanner context cannot be null"); - } - Optional rpcCall = RpcServer.getCurrentCall(); - - // Save the initial progress from the Scanner context in these local variables. The progress - // may need to be reset a few times if rows are being filtered out so we save the initial - // progress. - int initialBatchProgress = scannerContext.getBatchProgress(); - long initialSizeProgress = scannerContext.getDataSizeProgress(); - long initialHeapSizeProgress = scannerContext.getHeapSizeProgress(); - - // Used to check time limit - LimitScope limitScope = LimitScope.BETWEEN_CELLS; - - // The loop here is used only when at some point during the next we determine - // that due to effects of filters or otherwise, we have an empty row in the result. - // Then we loop and try again. Otherwise, we must get out on the first iteration via return, - // "true" if there's more data to read, "false" if there isn't (storeHeap is at a stop row, - // and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow). - while (true) { - // Starting to scan a new row. Reset the scanner progress according to whether or not - // progress should be kept. - if (scannerContext.getKeepProgress()) { - // Progress should be kept. Reset to initial values seen at start of method invocation. - scannerContext.setProgress(initialBatchProgress, initialSizeProgress, - initialHeapSizeProgress); - } else { - scannerContext.clearProgress(); - } - if (rpcCall.isPresent()) { - // If a user specifies a too-restrictive or too-slow scanner, the - // client might time out and disconnect while the server side - // is still processing the request. We should abort aggressively - // in that case. - long afterTime = rpcCall.get().disconnectSince(); - if (afterTime >= 0) { - throw new CallerDisconnectedException( - "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + - this + " after " + afterTime + " ms, since " + - "caller disconnected"); - } - } - - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - - // Let's see what we have in the storeHeap. - Cell current = this.storeHeap.peek(); - - boolean shouldStop = shouldStop(current); - // When has filter row is true it means that the all the cells for a particular row must be - // read before a filtering decision can be made. This means that filters where hasFilterRow - // run the risk of enLongAddering out of memory errors in the case that they are applied to a - // table that has very large rows. - boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); - - // If filter#hasFilterRow is true, partial results are not allowed since allowing them - // would prevent the filters from being evaluated. Thus, if it is true, change the - // scope of any limits that could potentially create partial results to - // LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row - if (hasFilterRow) { - if (LOG.isTraceEnabled()) { - LOG.trace("filter#hasFilterRow is true which prevents partial results from being " - + " formed. Changing scope of limits that may create partials"); - } - scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS); - scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS); - limitScope = LimitScope.BETWEEN_ROWS; - } - - if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { - if (hasFilterRow) { - throw new IncompatibleFilterException( - "Filter whose hasFilterRow() returns true is incompatible with scans that must " + - " stop mid-row because of a limit. ScannerContext:" + scannerContext); - } - return true; - } - - // Check if we were getting data from the joinedHeap and hit the limit. - // If not, then it's main path - getting results from storeHeap. - if (joinedContinuationRow == null) { - // First, check if we are at a stop row. If so, there are no more results. - if (shouldStop) { - if (hasFilterRow) { - filter.filterRowCells(results); - } - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // Check if rowkey filter wants to exclude this row. If so, loop to next. - // Technically, if we hit limits before on this row, we don't need this call. - if (filterRowKey(current)) { - incrementCountOfRowsFilteredMetric(scannerContext); - // early check, see HBASE-16296 - if (isFilterDoneInternal()) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - // Typically the count of rows scanned is incremented inside #populateResult. However, - // here we are filtering a row based purely on its row key, preventing us from calling - // #populateResult. Thus, perform the necessary increment here to rows scanned metric - incrementCountOfRowsScannedMetric(scannerContext); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - results.clear(); - - // Read nothing as the rowkey was filtered, but still need to check time limit - if (scannerContext.checkTimeLimit(limitScope)) { - return true; - } - continue; - } - - // Ok, we are good, let's try to get some results from the main heap. - populateResult(results, this.storeHeap, scannerContext, current); - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - if (hasFilterRow) { - throw new IncompatibleFilterException( - "Filter whose hasFilterRow() returns true is incompatible with scans that must " - + " stop mid-row because of a limit. ScannerContext:" + scannerContext); - } - return true; - } - - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - - Cell nextKv = this.storeHeap.peek(); - shouldStop = shouldStop(nextKv); - // save that the row was empty before filters applied to it. - final boolean isEmptyRow = results.isEmpty(); - - // We have the part of the row necessary for filtering (all of it, usually). - // First filter with the filterRow(List). - FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; - if (hasFilterRow) { - ret = filter.filterRowCellsWithRet(results); - - // We don't know how the results have changed after being filtered. Must set progress - // according to contents of results now. - if (scannerContext.getKeepProgress()) { - scannerContext.setProgress(initialBatchProgress, initialSizeProgress, - initialHeapSizeProgress); - } else { - scannerContext.clearProgress(); - } - scannerContext.incrementBatchProgress(results.size()); - for (Cell cell : results) { - scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), - cell.heapSize()); - } - } - - if (isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE || filterRow()) { - incrementCountOfRowsFilteredMetric(scannerContext); - results.clear(); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // This row was totally filtered out, if this is NOT the last row, - // we should continue on. Otherwise, nothing else to do. - if (!shouldStop) { - // Read nothing as the cells was filtered, but still need to check time limit - if (scannerContext.checkTimeLimit(limitScope)) { - return true; - } - continue; - } - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - - // Ok, we are done with storeHeap for this row. - // Now we may need to fetch additional, non-essential data into row. - // These values are not needed for filter to work, so we postpone their - // fetch to (possibly) reduce amount of data loads from disk. - if (this.joinedHeap != null) { - boolean mayHaveData = joinedHeapMayHaveData(current); - if (mayHaveData) { - joinedContinuationRow = current; - populateFromJoinedHeap(results, scannerContext); - - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - return true; - } - } - } - } else { - // Populating from the joined heap was stopped by limits, populate some more. - populateFromJoinedHeap(results, scannerContext); - if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { - return true; - } - } - // We may have just called populateFromJoinedMap and hit the limits. If that is - // the case, we need to call it again on the next next() invocation. - if (joinedContinuationRow != null) { - return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); - } - - // Finally, we are done with both joinedHeap and storeHeap. - // Double check to prevent empty rows from appearing in result. It could be - // the case when SingleColumnValueExcludeFilter is used. - if (results.isEmpty()) { - incrementCountOfRowsFilteredMetric(scannerContext); - boolean moreRows = nextRow(scannerContext, current); - if (!moreRows) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } - if (!shouldStop) continue; - } - - if (shouldStop) { - return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); - } else { - return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); - } - } - } - - protected void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) { - filteredReadRequestsCount.increment(); - if (metricsRegion != null) { - metricsRegion.updateFilteredRecords(); - } - - if (scannerContext == null || !scannerContext.isTrackingMetrics()) return; - - scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet(); - } - - protected void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { - if (scannerContext == null || !scannerContext.isTrackingMetrics()) return; - - scannerContext.getMetrics().countOfRowsScanned.incrementAndGet(); - } - - /** - * @param currentRowCell - * @return true when the joined heap may have data for the current row - * @throws IOException - */ - private boolean joinedHeapMayHaveData(Cell currentRowCell) - throws IOException { - Cell nextJoinedKv = joinedHeap.peek(); - boolean matchCurrentRow = - nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); - boolean matchAfterSeek = false; - - // If the next value in the joined heap does not match the current row, try to seek to the - // correct row - if (!matchCurrentRow) { - Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); - boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); - matchAfterSeek = - seekSuccessful && joinedHeap.peek() != null - && CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); - } - - return matchCurrentRow || matchAfterSeek; - } - - /** - * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines - * both filterRow & filterRow({@code List kvs}) functions. While 0.94 code or older, - * it may not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only - * returns true when filterRow({@code List kvs}) is overridden not the filterRow(). - * Therefore, the filterRow() will be skipped. - */ - private boolean filterRow() throws IOException { - // when hasFilterRow returns true, filter.filterRow() will be called automatically inside - // filterRowCells(List kvs) so we skip that scenario here. - return filter != null && (!filter.hasFilterRow()) - && filter.filterRow(); - } - - private boolean filterRowKey(Cell current) throws IOException { - return filter != null && filter.filterRowKey(current); - } - - protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { - assert this.joinedContinuationRow == null: "Trying to go to next row during joinedHeap read."; - Cell next; - while ((next = this.storeHeap.peek()) != null && - CellUtil.matchingRows(next, curRowCell)) { - // Check for thread interrupt status in case we have been signaled from - // #interruptRegionOperation. - checkInterrupt(); - this.storeHeap.next(MOCKED_LIST); - } - resetFilters(); - - // Calling the hook in CP which allows it to do a fast forward - return this.region.getCoprocessorHost() == null - || this.region.getCoprocessorHost() - .postScannerFilterRow(this, curRowCell); - } - - protected boolean shouldStop(Cell currentRowCell) { - if (currentRowCell == null) { - return true; - } - if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) { - return false; - } - int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length); - return c > 0 || (c == 0 && !includeStopRow); - } - - @Override - public synchronized void close() { - if (storeHeap != null) { - storeHeap.close(); - storeHeap = null; - } - if (joinedHeap != null) { - joinedHeap.close(); - joinedHeap = null; - } - // no need to synchronize here. - scannerReadPoints.remove(this); - this.filterClosed = true; - } - - KeyValueHeap getStoreHeapForTesting() { - return storeHeap; - } - - @Override - public synchronized boolean reseek(byte[] row) throws IOException { - if (row == null) { - throw new IllegalArgumentException("Row cannot be null."); - } - boolean result = false; - startRegionOperation(); - Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); - try { - // use request seek to make use of the lazy seek option. See HBASE-5520 - result = this.storeHeap.requestSeek(kv, true, true); - if (this.joinedHeap != null) { - result = this.joinedHeap.requestSeek(kv, true, true) || result; - } - } finally { - closeRegionOperation(); - } - return result; - } - - @Override - public void shipped() throws IOException { - if (storeHeap != null) { - storeHeap.shipped(); - } - if (joinedHeap != null) { - joinedHeap.shipped(); - } - } - - @Override - public void run() throws IOException { - // This is the RPC callback method executed. We do the close in of the scanner in this - // callback - this.close(); - } - } - // Utility methods /** * A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL} @@ -8661,14 +7957,6 @@ private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, Listcells using comparator - */ - private static List sort(List cells, final CellComparator comparator) { - cells.sort(comparator); - return cells; - } - public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HRegion.class, false); // woefully out of date - currently missing: @@ -9067,32 +8355,6 @@ private boolean shouldSyncWAL() { return regionDurability.ordinal() > Durability.ASYNC_WAL.ordinal(); } - /** - * A mocked list implementation - discards all updates. - */ - private static final List MOCKED_LIST = new AbstractList() { - - @Override - public void add(int index, Cell element) { - // do nothing - } - - @Override - public boolean addAll(int index, Collection c) { - return false; // this list is never changed as a result of an update - } - - @Override - public KeyValue get(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public int size() { - return 0; - } - }; - /** @return the latest sequence number that was read from storage when this region was opened */ public long getOpenSeqNum() { return this.openSeqNum; @@ -9340,11 +8602,11 @@ private static void decorateRegionConfiguration(Configuration conf) { } } - public void setReadRequestsCount(long readRequestsCount) { + public void addReadRequestsCount(long readRequestsCount) { this.readRequestsCount.add(readRequestsCount); } - public void setWriteRequestsCount(long writeRequestsCount) { + public void addWriteRequestsCount(long writeRequestsCount) { this.writeRequestsCount.add(writeRequestsCount); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index f8323c6a1164..587919dac6d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -120,7 +120,6 @@ import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.LeaseManager.Lease; import org.apache.hadoop.hbase.regionserver.LeaseManager.LeaseStillHeldException; import org.apache.hadoop.hbase.regionserver.Region.Operation; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java new file mode 100644 index 000000000000..5d81687cbf45 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -0,0 +1,782 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.IsolationLevel; +import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.FilterWrapper; +import org.apache.hadoop.hbase.filter.IncompatibleFilterException; +import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcCallback; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.regionserver.Region.Operation; +import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +/** + * RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). + */ +@InterfaceAudience.Private +class RegionScannerImpl implements RegionScanner, Shipper, RpcCallback { + + private static final Logger LOG = LoggerFactory.getLogger(RegionScannerImpl.class); + + // Package local for testability + KeyValueHeap storeHeap = null; + + /** + * Heap of key-values that are not essential for the provided filters and are thus read on demand, + * if on-demand column family loading is enabled. + */ + KeyValueHeap joinedHeap = null; + + /** + * If the joined heap data gathering is interrupted due to scan limits, this will contain the row + * for which we are populating the values. + */ + protected Cell joinedContinuationRow = null; + private boolean filterClosed = false; + + protected final byte[] stopRow; + protected final boolean includeStopRow; + protected final HRegion region; + protected final CellComparator comparator; + + private final ConcurrentHashMap scannerReadPoints; + + private final long readPt; + private final long maxResultSize; + private final ScannerContext defaultScannerContext; + private final FilterWrapper filter; + + private RegionServerServices rsServices; + + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } + + private static boolean hasNonce(HRegion region, long nonce) { + RegionServerServices rsServices = region.getRegionServerServices(); + return nonce != HConstants.NO_NONCE && rsServices != null && + rsServices.getNonceManager() != null; + } + + RegionScannerImpl(Scan scan, List additionalScanners, HRegion region, + long nonceGroup, long nonce) throws IOException { + this.region = region; + this.maxResultSize = scan.getMaxResultSize(); + if (scan.hasFilter()) { + this.filter = new FilterWrapper(scan.getFilter()); + } else { + this.filter = null; + } + this.comparator = region.getCellComparator(); + /** + * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default + * scanner context that can be used to enforce the batch limit in the event that a + * ScannerContext is not specified during an invocation of next/nextRaw + */ + defaultScannerContext = ScannerContext.newBuilder().setBatchLimit(scan.getBatch()).build(); + this.stopRow = scan.getStopRow(); + this.includeStopRow = scan.includeStopRow(); + + // synchronize on scannerReadPoints so that nobody calculates + // getSmallestReadPoint, before scannerReadPoints is updated. + IsolationLevel isolationLevel = scan.getIsolationLevel(); + long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); + this.scannerReadPoints = region.scannerReadPoints; + this.rsServices = region.getRegionServerServices(); + synchronized (scannerReadPoints) { + if (mvccReadPoint > 0) { + this.readPt = mvccReadPoint; + } else if (hasNonce(region, nonce)) { + this.readPt = rsServices.getNonceManager().getMvccFromOperationContext(nonceGroup, nonce); + } else { + this.readPt = region.getReadPoint(isolationLevel); + } + scannerReadPoints.put(this, this.readPt); + } + initializeScanners(scan, additionalScanners); + } + + private void initializeScanners(Scan scan, List additionalScanners) + throws IOException { + // Here we separate all scanners into two lists - scanner that provide data required + // by the filter to operate (scanners list) and all others (joinedScanners list). + List scanners = new ArrayList<>(scan.getFamilyMap().size()); + List joinedScanners = new ArrayList<>(scan.getFamilyMap().size()); + // Store all already instantiated scanners for exception handling + List instantiatedScanners = new ArrayList<>(); + // handle additionalScanners + if (additionalScanners != null && !additionalScanners.isEmpty()) { + scanners.addAll(additionalScanners); + instantiatedScanners.addAll(additionalScanners); + } + + try { + for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { + HStore store = region.getStore(entry.getKey()); + KeyValueScanner scanner = store.getScanner(scan, entry.getValue(), this.readPt); + instantiatedScanners.add(scanner); + if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand() || + this.filter.isFamilyEssential(entry.getKey())) { + scanners.add(scanner); + } else { + joinedScanners.add(scanner); + } + } + initializeKVHeap(scanners, joinedScanners, region); + } catch (Throwable t) { + throw handleException(instantiatedScanners, t); + } + } + + protected void initializeKVHeap(List scanners, + List joinedScanners, HRegion region) throws IOException { + this.storeHeap = new KeyValueHeap(scanners, comparator); + if (!joinedScanners.isEmpty()) { + this.joinedHeap = new KeyValueHeap(joinedScanners, comparator); + } + } + + private IOException handleException(List instantiatedScanners, Throwable t) { + // remove scaner read point before throw the exception + scannerReadPoints.remove(this); + if (storeHeap != null) { + storeHeap.close(); + storeHeap = null; + if (joinedHeap != null) { + joinedHeap.close(); + joinedHeap = null; + } + } else { + // close all already instantiated scanners before throwing the exception + for (KeyValueScanner scanner : instantiatedScanners) { + scanner.close(); + } + } + return t instanceof IOException ? (IOException) t : new IOException(t); + } + + @Override + public long getMaxResultSize() { + return maxResultSize; + } + + @Override + public long getMvccReadPoint() { + return this.readPt; + } + + @Override + public int getBatch() { + return this.defaultScannerContext.getBatchLimit(); + } + + /** + * Reset both the filter and the old filter. + * @throws IOException in case a filter raises an I/O exception. + */ + protected final void resetFilters() throws IOException { + if (filter != null) { + filter.reset(); + } + } + + @Override + public boolean next(List outResults) throws IOException { + // apply the batching limit by default + return next(outResults, defaultScannerContext); + } + + @Override + public synchronized boolean next(List outResults, ScannerContext scannerContext) + throws IOException { + if (this.filterClosed) { + throw new UnknownScannerException("Scanner was closed (timed out?) " + + "after we renewed it. Could be caused by a very slow scanner " + + "or a lengthy garbage collection"); + } + region.startRegionOperation(Operation.SCAN); + try { + return nextRaw(outResults, scannerContext); + } finally { + region.closeRegionOperation(Operation.SCAN); + } + } + + @Override + public boolean nextRaw(List outResults) throws IOException { + // Use the RegionScanner's context by default + return nextRaw(outResults, defaultScannerContext); + } + + @Override + public boolean nextRaw(List outResults, ScannerContext scannerContext) throws IOException { + if (storeHeap == null) { + // scanner is closed + throw new UnknownScannerException("Scanner was closed"); + } + boolean moreValues = false; + if (outResults.isEmpty()) { + // Usually outResults is empty. This is true when next is called + // to handle scan or get operation. + moreValues = nextInternal(outResults, scannerContext); + } else { + List tmpList = new ArrayList<>(); + moreValues = nextInternal(tmpList, scannerContext); + outResults.addAll(tmpList); + } + + if (!outResults.isEmpty()) { + region.addReadRequestsCount(1); + if (region.getMetrics() != null) { + region.getMetrics().updateReadRequestCount(); + } + } + if (rsServices != null && rsServices.getMetrics() != null) { + rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable()); + } + + // If the size limit was reached it means a partial Result is being returned. Returning a + // partial Result means that we should not reset the filters; filters should only be reset in + // between rows + if (!scannerContext.mayHaveMoreCellsInRow()) { + resetFilters(); + } + + if (isFilterDoneInternal()) { + moreValues = false; + } + return moreValues; + } + + /** + * @return true if more cells exist after this batch, false if scanner is done + */ + private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) + throws IOException { + assert joinedContinuationRow != null; + boolean moreValues = + populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow); + + if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + // We are done with this row, reset the continuation. + joinedContinuationRow = null; + } + // As the data is obtained from two independent heaps, we need to + // ensure that result list is sorted, because Result relies on that. + results.sort(comparator); + return moreValues; + } + + /** + * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is + * reached, or remainingResultSize (if not -1) is reaced + * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. + * @return state of last call to {@link KeyValueHeap#next()} + */ + private boolean populateResult(List results, KeyValueHeap heap, + ScannerContext scannerContext, Cell currentRowCell) throws IOException { + Cell nextKv; + boolean moreCellsInRow = false; + boolean tmpKeepProgress = scannerContext.getKeepProgress(); + // Scanning between column families and thus the scope is between cells + LimitScope limitScope = LimitScope.BETWEEN_CELLS; + do { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + // We want to maintain any progress that is made towards the limits while scanning across + // different column families. To do this, we toggle the keep progress flag on during calls + // to the StoreScanner to ensure that any progress made thus far is not wiped away. + scannerContext.setKeepProgress(true); + heap.next(results, scannerContext); + scannerContext.setKeepProgress(tmpKeepProgress); + + nextKv = heap.peek(); + moreCellsInRow = moreCellsInRow(nextKv, currentRowCell); + if (!moreCellsInRow) { + incrementCountOfRowsScannedMetric(scannerContext); + } + if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) { + return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); + } else if (scannerContext.checkSizeLimit(limitScope)) { + ScannerContext.NextState state = + moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; + return scannerContext.setScannerState(state).hasMoreValues(); + } else if (scannerContext.checkTimeLimit(limitScope)) { + ScannerContext.NextState state = + moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED; + return scannerContext.setScannerState(state).hasMoreValues(); + } + } while (moreCellsInRow); + return nextKv != null; + } + + /** + * Based on the nextKv in the heap, and the current row, decide whether or not there are more + * cells to be read in the heap. If the row of the nextKv in the heap matches the current row then + * there are more cells to be read in the row. + * @return true When there are more cells in the row to be read + */ + private boolean moreCellsInRow(final Cell nextKv, Cell currentRowCell) { + return nextKv != null && CellUtil.matchingRows(nextKv, currentRowCell); + } + + /** + * @return True if a filter rules the scanner is over, done. + */ + @Override + public synchronized boolean isFilterDone() throws IOException { + return isFilterDoneInternal(); + } + + private boolean isFilterDoneInternal() throws IOException { + return this.filter != null && this.filter.filterAllRemaining(); + } + + private void checkClientDisconnect(Optional rpcCall) throws CallerDisconnectedException { + if (rpcCall.isPresent()) { + // If a user specifies a too-restrictive or too-slow scanner, the + // client might time out and disconnect while the server side + // is still processing the request. We should abort aggressively + // in that case. + long afterTime = rpcCall.get().disconnectSince(); + if (afterTime >= 0) { + throw new CallerDisconnectedException( + "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + this + + " after " + afterTime + " ms, since " + "caller disconnected"); + } + } + } + + private void resetProgress(ScannerContext scannerContext, int initialBatchProgress, + long initialSizeProgress, long initialHeapSizeProgress) { + // Starting to scan a new row. Reset the scanner progress according to whether or not + // progress should be kept. + if (scannerContext.getKeepProgress()) { + // Progress should be kept. Reset to initial values seen at start of method invocation. + scannerContext.setProgress(initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + } else { + scannerContext.clearProgress(); + } + } + + private boolean nextInternal(List results, ScannerContext scannerContext) + throws IOException { + Preconditions.checkArgument(results.isEmpty(), "First parameter should be an empty list"); + Preconditions.checkArgument(scannerContext != null, "Scanner context cannot be null"); + Optional rpcCall = RpcServer.getCurrentCall(); + + // Save the initial progress from the Scanner context in these local variables. The progress + // may need to be reset a few times if rows are being filtered out so we save the initial + // progress. + int initialBatchProgress = scannerContext.getBatchProgress(); + long initialSizeProgress = scannerContext.getDataSizeProgress(); + long initialHeapSizeProgress = scannerContext.getHeapSizeProgress(); + + // Used to check time limit + LimitScope limitScope = LimitScope.BETWEEN_CELLS; + + // The loop here is used only when at some point during the next we determine + // that due to effects of filters or otherwise, we have an empty row in the result. + // Then we loop and try again. Otherwise, we must get out on the first iteration via return, + // "true" if there's more data to read, "false" if there isn't (storeHeap is at a stop row, + // and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow). + while (true) { + resetProgress(scannerContext, initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + checkClientDisconnect(rpcCall); + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + // Let's see what we have in the storeHeap. + Cell current = this.storeHeap.peek(); + + boolean shouldStop = shouldStop(current); + // When has filter row is true it means that the all the cells for a particular row must be + // read before a filtering decision can be made. This means that filters where hasFilterRow + // run the risk of enLongAddering out of memory errors in the case that they are applied to a + // table that has very large rows. + boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); + + // If filter#hasFilterRow is true, partial results are not allowed since allowing them + // would prevent the filters from being evaluated. Thus, if it is true, change the + // scope of any limits that could potentially create partial results to + // LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row + if (hasFilterRow) { + if (LOG.isTraceEnabled()) { + LOG.trace("filter#hasFilterRow is true which prevents partial results from being " + + " formed. Changing scope of limits that may create partials"); + } + scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS); + scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS); + limitScope = LimitScope.BETWEEN_ROWS; + } + + if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { + if (hasFilterRow) { + throw new IncompatibleFilterException( + "Filter whose hasFilterRow() returns true is incompatible with scans that must " + + " stop mid-row because of a limit. ScannerContext:" + scannerContext); + } + return true; + } + + // Check if we were getting data from the joinedHeap and hit the limit. + // If not, then it's main path - getting results from storeHeap. + if (joinedContinuationRow == null) { + // First, check if we are at a stop row. If so, there are no more results. + if (shouldStop) { + if (hasFilterRow) { + filter.filterRowCells(results); + } + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // Check if rowkey filter wants to exclude this row. If so, loop to next. + // Technically, if we hit limits before on this row, we don't need this call. + if (filterRowKey(current)) { + incrementCountOfRowsFilteredMetric(scannerContext); + // early check, see HBASE-16296 + if (isFilterDoneInternal()) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + // Typically the count of rows scanned is incremented inside #populateResult. However, + // here we are filtering a row based purely on its row key, preventing us from calling + // #populateResult. Thus, perform the necessary increment here to rows scanned metric + incrementCountOfRowsScannedMetric(scannerContext); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + results.clear(); + + // Read nothing as the rowkey was filtered, but still need to check time limit + if (scannerContext.checkTimeLimit(limitScope)) { + return true; + } + continue; + } + + // Ok, we are good, let's try to get some results from the main heap. + populateResult(results, this.storeHeap, scannerContext, current); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + if (hasFilterRow) { + throw new IncompatibleFilterException( + "Filter whose hasFilterRow() returns true is incompatible with scans that must " + + " stop mid-row because of a limit. ScannerContext:" + scannerContext); + } + return true; + } + + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + + Cell nextKv = this.storeHeap.peek(); + shouldStop = shouldStop(nextKv); + // save that the row was empty before filters applied to it. + final boolean isEmptyRow = results.isEmpty(); + + // We have the part of the row necessary for filtering (all of it, usually). + // First filter with the filterRow(List). + FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; + if (hasFilterRow) { + ret = filter.filterRowCellsWithRet(results); + + // We don't know how the results have changed after being filtered. Must set progress + // according to contents of results now. + if (scannerContext.getKeepProgress()) { + scannerContext.setProgress(initialBatchProgress, initialSizeProgress, + initialHeapSizeProgress); + } else { + scannerContext.clearProgress(); + } + scannerContext.incrementBatchProgress(results.size()); + for (Cell cell : results) { + scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), + cell.heapSize()); + } + } + + if (isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE || filterRow()) { + incrementCountOfRowsFilteredMetric(scannerContext); + results.clear(); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // This row was totally filtered out, if this is NOT the last row, + // we should continue on. Otherwise, nothing else to do. + if (!shouldStop) { + // Read nothing as the cells was filtered, but still need to check time limit + if (scannerContext.checkTimeLimit(limitScope)) { + return true; + } + continue; + } + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + + // Ok, we are done with storeHeap for this row. + // Now we may need to fetch additional, non-essential data into row. + // These values are not needed for filter to work, so we postpone their + // fetch to (possibly) reduce amount of data loads from disk. + if (this.joinedHeap != null) { + boolean mayHaveData = joinedHeapMayHaveData(current); + if (mayHaveData) { + joinedContinuationRow = current; + populateFromJoinedHeap(results, scannerContext); + + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } + } + } + } else { + // Populating from the joined heap was stopped by limits, populate some more. + populateFromJoinedHeap(results, scannerContext); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } + } + // We may have just called populateFromJoinedMap and hit the limits. If that is + // the case, we need to call it again on the next next() invocation. + if (joinedContinuationRow != null) { + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); + } + + // Finally, we are done with both joinedHeap and storeHeap. + // Double check to prevent empty rows from appearing in result. It could be + // the case when SingleColumnValueExcludeFilter is used. + if (results.isEmpty()) { + incrementCountOfRowsFilteredMetric(scannerContext); + boolean moreRows = nextRow(scannerContext, current); + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + if (!shouldStop) { + continue; + } + } + + if (shouldStop) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } else { + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); + } + } + } + + private void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) { + region.filteredReadRequestsCount.increment(); + if (region.getMetrics() != null) { + region.getMetrics().updateFilteredRecords(); + } + + if (scannerContext == null || !scannerContext.isTrackingMetrics()) { + return; + } + + scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet(); + } + + private void incrementCountOfRowsScannedMetric(ScannerContext scannerContext) { + if (scannerContext == null || !scannerContext.isTrackingMetrics()) { + return; + } + + scannerContext.getMetrics().countOfRowsScanned.incrementAndGet(); + } + + /** + * @return true when the joined heap may have data for the current row + */ + private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { + Cell nextJoinedKv = joinedHeap.peek(); + boolean matchCurrentRow = + nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); + boolean matchAfterSeek = false; + + // If the next value in the joined heap does not match the current row, try to seek to the + // correct row + if (!matchCurrentRow) { + Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); + boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); + matchAfterSeek = seekSuccessful && joinedHeap.peek() != null && + CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); + } + + return matchCurrentRow || matchAfterSeek; + } + + /** + * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines both + * filterRow & filterRow({@code List kvs}) functions. While 0.94 code or older, it may + * not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only returns true + * when filterRow({@code List kvs}) is overridden not the filterRow(). Therefore, the + * filterRow() will be skipped. + */ + private boolean filterRow() throws IOException { + // when hasFilterRow returns true, filter.filterRow() will be called automatically inside + // filterRowCells(List kvs) so we skip that scenario here. + return filter != null && (!filter.hasFilterRow()) && filter.filterRow(); + } + + private boolean filterRowKey(Cell current) throws IOException { + return filter != null && filter.filterRowKey(current); + } + + /** + * A mocked list implementation - discards all updates. + */ + private static final List MOCKED_LIST = new AbstractList() { + + @Override + public void add(int index, Cell element) { + // do nothing + } + + @Override + public boolean addAll(int index, Collection c) { + return false; // this list is never changed as a result of an update + } + + @Override + public KeyValue get(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public int size() { + return 0; + } + }; + + protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { + assert this.joinedContinuationRow == null : "Trying to go to next row during joinedHeap read."; + Cell next; + while ((next = this.storeHeap.peek()) != null && CellUtil.matchingRows(next, curRowCell)) { + // Check for thread interrupt status in case we have been signaled from + // #interruptRegionOperation. + region.checkInterrupt(); + this.storeHeap.next(MOCKED_LIST); + } + resetFilters(); + + // Calling the hook in CP which allows it to do a fast forward + return this.region.getCoprocessorHost() == null || + this.region.getCoprocessorHost().postScannerFilterRow(this, curRowCell); + } + + protected boolean shouldStop(Cell currentRowCell) { + if (currentRowCell == null) { + return true; + } + if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) { + return false; + } + int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length); + return c > 0 || (c == 0 && !includeStopRow); + } + + @Override + public synchronized void close() { + if (storeHeap != null) { + storeHeap.close(); + storeHeap = null; + } + if (joinedHeap != null) { + joinedHeap.close(); + joinedHeap = null; + } + // no need to synchronize here. + scannerReadPoints.remove(this); + this.filterClosed = true; + } + + @Override + public synchronized boolean reseek(byte[] row) throws IOException { + if (row == null) { + throw new IllegalArgumentException("Row cannot be null."); + } + boolean result = false; + region.startRegionOperation(); + Cell kv = PrivateCellUtil.createFirstOnRow(row, 0, (short) row.length); + try { + // use request seek to make use of the lazy seek option. See HBASE-5520 + result = this.storeHeap.requestSeek(kv, true, true); + if (this.joinedHeap != null) { + result = this.joinedHeap.requestSeek(kv, true, true) || result; + } + } finally { + region.closeRegionOperation(); + } + return result; + } + + @Override + public void shipped() throws IOException { + if (storeHeap != null) { + storeHeap.shipped(); + } + if (joinedHeap != null) { + joinedHeap.shipped(); + } + } + + @Override + public void run() throws IOException { + // This is the RPC callback method executed. We do the close in of the scanner in this + // callback + this.close(); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java index 3ca064f05101..d1995f237d2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -37,15 +36,9 @@ @InterfaceAudience.Private class ReversedRegionScannerImpl extends RegionScannerImpl { - /** - * @param scan - * @param additionalScanners - * @param region - * @throws IOException - */ - ReversedRegionScannerImpl(Scan scan, List additionalScanners, HRegion region) - throws IOException { - region.super(scan, additionalScanners, region); + ReversedRegionScannerImpl(Scan scan, List additionalScanners, HRegion region, + long nonceGroup, long nonce) throws IOException { + super(scan, additionalScanners, region, nonceGroup, nonce); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java index c0b954a97137..c55a9f966b67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java @@ -131,8 +131,8 @@ public void testRecoveryAndDoubleExecutionReopen() throws Exception { UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); HRegionServer rs = UTIL.getRSForFirstRegionInTable(tableName); HRegion region = rs.getRegions(tableName).get(0); - region.setReadRequestsCount(1); - region.setWriteRequestsCount(2); + region.addReadRequestsCount(1); + region.addWriteRequestsCount(2); long openSeqNum = region.getOpenSeqNum(); TransitRegionStateProcedure proc = TransitRegionStateProcedure.reopen(env, region.getRegionInfo()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 58668933c61f..b56f96a51149 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -137,7 +137,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.Region.RowLock; import org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem; @@ -3768,7 +3767,7 @@ public void testGetScanner_WithNoFamilies() throws IOException { region.put(put); Scan scan = null; - HRegion.RegionScannerImpl is = null; + RegionScannerImpl is = null; // Testing to see how many scanners that is produced by getScanner, // starting diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 174e2489aa71..92f790c95c13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.BloomFilterUtil; @@ -131,7 +130,7 @@ private void scanColSet(int[] colSet, int[] expectedResultCols) Scan scan = new Scan().withStartRow(ROW_BYTES).withStopRow(ROW_BYTES, true); addColumnSetToScan(scan, colSet); RegionScannerImpl scanner = region.getScanner(scan); - KeyValueHeap storeHeap = scanner.getStoreHeapForTesting(); + KeyValueHeap storeHeap = scanner.storeHeap; assertEquals(0, storeHeap.getHeap().size()); StoreScanner storeScanner = (StoreScanner) storeHeap.getCurrentForTesting(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java index 71796105af71..810f0f68256b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; @@ -525,7 +524,7 @@ protected RegionScannerImpl instantiateRegionScanner(Scan scan, private static class HeartbeatReversedRegionScanner extends ReversedRegionScannerImpl { HeartbeatReversedRegionScanner(Scan scan, List additionalScanners, HRegion region) throws IOException { - super(scan, additionalScanners, region); + super(scan, additionalScanners, region, HConstants.NO_NONCE, HConstants.NO_NONCE); } @Override @@ -554,7 +553,7 @@ protected void initializeKVHeap(List scanners, private static class HeartbeatRegionScanner extends RegionScannerImpl { HeartbeatRegionScanner(Scan scan, List additionalScanners, HRegion region) throws IOException { - region.super(scan, additionalScanners, region); + super(scan, additionalScanners, region, HConstants.NO_NONCE, HConstants.NO_NONCE); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java index 11949153d3e2..61a0689bc4af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -102,7 +101,7 @@ public void tearDown() throws IOException { public void test() throws IOException { try (RegionScannerImpl scanner = REGION.getScanner(new Scan())) { StoreScanner storeScanner = - (StoreScanner) (scanner).getStoreHeapForTesting().getCurrentForTesting(); + (StoreScanner) scanner.storeHeap.getCurrentForTesting(); for (KeyValueScanner kvs : storeScanner.getAllScannersForTesting()) { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; @@ -151,8 +150,7 @@ public boolean filterRowKey(Cell cell) throws IOException { private void testFilter(Filter filter) throws IOException { try (RegionScannerImpl scanner = REGION.getScanner(new Scan().setFilter(filter))) { - StoreScanner storeScanner = - (StoreScanner) (scanner).getStoreHeapForTesting().getCurrentForTesting(); + StoreScanner storeScanner = (StoreScanner) scanner.storeHeap.getCurrentForTesting(); for (KeyValueScanner kvs : storeScanner.getAllScannersForTesting()) { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index fca371f12caa..ea32a3330226 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -157,7 +157,7 @@ public void testWideScanBatching() throws IOException { // trigger ChangedReadersObservers Iterator scanners = - ((HRegion.RegionScannerImpl) s).storeHeap.getHeap().iterator(); + ((RegionScannerImpl) s).storeHeap.getHeap().iterator(); while (scanners.hasNext()) { StoreScanner ss = (StoreScanner) scanners.next(); ss.updateReaders(Collections.emptyList(), Collections.emptyList()); From 8e16347cb954abfed8a180ea684c4a14fcdf139f Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Fri, 8 Jan 2021 18:58:30 +0800 Subject: [PATCH 628/769] HBASE-25434 SlowDelete & SlowPut metric value should use updateDelete & updatePut (#2837) Signed-off-by: Zheng Wang --- .../hbase/regionserver/MetricsRegionServer.java | 12 ++++++------ .../hbase/regionserver/TestMetricsRegionServer.java | 10 ++++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 3bd787d10074..86b97a2afb9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -113,9 +113,6 @@ public void updatePutBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePutBatch(tn, t); } - if (t > slowMetricTime) { - serverSource.incrSlowPut(); - } serverSource.updatePutBatch(t); } @@ -123,6 +120,9 @@ public void updatePut(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updatePut(tn, t); } + if (t > slowMetricTime) { + serverSource.incrSlowPut(); + } serverSource.updatePut(t); userAggregate.updatePut(t); } @@ -131,6 +131,9 @@ public void updateDelete(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDelete(tn, t); } + if (t > slowMetricTime) { + serverSource.incrSlowDelete(); + } serverSource.updateDelete(t); userAggregate.updateDelete(t); } @@ -139,9 +142,6 @@ public void updateDeleteBatch(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateDeleteBatch(tn, t); } - if (t > slowMetricTime) { - serverSource.incrSlowDelete(); - } serverSource.updateDeleteBatch(t); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index e56eb0f20aaa..777ba5e8feb2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -151,6 +151,8 @@ public void testSlowCount() { for (int i=0; i < 17; i ++) { rsm.updatePut(null, 17); rsm.updateDelete(null, 17); + rsm.updatePut(null, 1006); + rsm.updateDelete(null, 1003); rsm.updateCheckAndDelete(null, 17); rsm.updateCheckAndPut(null, 17); rsm.updateCheckAndMutate(null, 17); @@ -161,17 +163,17 @@ public void testSlowCount() { HELPER.assertCounter("getNumOps", 28, serverSource); HELPER.assertCounter("incrementNumOps", 30, serverSource); HELPER.assertCounter("putBatchNumOps", 32, serverSource); - HELPER.assertCounter("putNumOps", 17, serverSource); - HELPER.assertCounter("deleteNumOps", 17, serverSource); + HELPER.assertCounter("putNumOps", 34, serverSource); + HELPER.assertCounter("deleteNumOps", 34, serverSource); HELPER.assertCounter("checkAndDeleteNumOps", 17, serverSource); HELPER.assertCounter("checkAndPutNumOps", 17, serverSource); HELPER.assertCounter("checkAndMutateNumOps", 17, serverSource); HELPER.assertCounter("slowAppendCount", 12, serverSource); - HELPER.assertCounter("slowDeleteCount", 13, serverSource); + HELPER.assertCounter("slowDeleteCount", 17, serverSource); HELPER.assertCounter("slowGetCount", 14, serverSource); HELPER.assertCounter("slowIncrementCount", 15, serverSource); - HELPER.assertCounter("slowPutCount", 16, serverSource); + HELPER.assertCounter("slowPutCount", 17, serverSource); } @Test From 1b7e14c1d998f07ced0da4f0f5718ba650f86394 Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Fri, 8 Jan 2021 21:57:23 +0800 Subject: [PATCH 629/769] HBASE-25483 set the loadMeta log level to debug (#2859) Signed-off-by: Pankaj Kumar --- .../hadoop/hbase/master/assignment/RegionStateStore.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 4da9493386ae..5036711507f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -164,8 +164,7 @@ private void visitMetaEntry(final RegionStateVisitor visitor, final Result resul ServerName regionLocation = MetaTableAccessor.getTargetServerName(result, replicaId); final long openSeqNum = hrl.getSeqNum(); - // TODO: move under trace, now is visible for debugging - LOG.info( + LOG.debug( "Load hbase:meta entry region={}, regionState={}, lastHost={}, " + "regionLocation={}, openSeqNum={}", regionInfo.getEncodedName(), state, lastHost, regionLocation, openSeqNum); From d6796bb523407513cd6e8664bc83df5f54824f6d Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Sat, 9 Jan 2021 00:00:15 +0800 Subject: [PATCH 630/769] HBASE-25459 WAL can't be cleaned in some scenes (#2848) Signed-off-by: Wellington Chevreuil --- .../regionserver/wal/SequenceIdAccounting.java | 6 +++++- .../wal/TestSequenceIdAccounting.java | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 5c6fcd2d1aa6..6be95391819b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -250,7 +250,11 @@ ConcurrentMap getOrCreateLowestSequenceIds(byte[] enco */ private static long getLowestSequenceId(Map sequenceids) { long lowest = HConstants.NO_SEQNUM; - for (Long sid: sequenceids.values()) { + for (Map.Entry entry : sequenceids.entrySet()){ + if (entry.getKey().toString().equals("METAFAMILY")){ + continue; + } + Long sid = entry.getValue(); if (lowest == HConstants.NO_SEQNUM || sid.longValue() < lowest) { lowest = sid.longValue(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java index 098dc86461b6..8eb99b3a4772 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java @@ -44,10 +44,14 @@ public class TestSequenceIdAccounting { private static final byte [] ENCODED_REGION_NAME = Bytes.toBytes("r"); private static final byte [] FAMILY_NAME = Bytes.toBytes("cf"); + private static final byte [] META_FAMILY = Bytes.toBytes("METAFAMILY"); private static final Set FAMILIES; + private static final Set META_FAMILY_SET; static { FAMILIES = new HashSet<>(); FAMILIES.add(FAMILY_NAME); + META_FAMILY_SET = new HashSet<>(); + META_FAMILY_SET.add(META_FAMILY); } @Test @@ -123,6 +127,20 @@ public void testAreAllLower() { sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); assertTrue(sida.areAllLower(m, null)); + m.put(ENCODED_REGION_NAME, sequenceid); + assertFalse(sida.areAllLower(m, null)); + + // Test the METAFAMILY is filtered in SequenceIdAccounting.lowestUnflushedSequenceIds + SequenceIdAccounting meta_sida = new SequenceIdAccounting(); + Map meta_m = new HashMap<>(); + meta_sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); + meta_m.put(ENCODED_REGION_NAME, sequenceid); + meta_sida.update(ENCODED_REGION_NAME, META_FAMILY_SET, ++sequenceid, true); + meta_sida.update(ENCODED_REGION_NAME, META_FAMILY_SET, ++sequenceid, true); + meta_sida.update(ENCODED_REGION_NAME, META_FAMILY_SET, ++sequenceid, true); + assertTrue(meta_sida.areAllLower(meta_m, null)); + meta_m.put(ENCODED_REGION_NAME, sequenceid); + assertTrue(meta_sida.areAllLower(meta_m, null)); } @Test From c8a10655b1cfd8406f169043e3ec1e574441c5dd Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 8 Jan 2021 12:38:45 -0800 Subject: [PATCH 631/769] HBASE-25487 [create-release] changes.md update broken (#2864) Signed-off-by: Huaxiang Sun --- dev-support/create-release/release-util.sh | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 5f7224fded6b..48e240f3402c 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -16,6 +16,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# Source this file if you want to use any of its utiilty (also useful +# testing the below functions). Do "$ . ./release-util.sh" and then +# you can do stuff like call the CHANGES updating function +# update_releasenotes: +# +# $ update_releasenotes ~/checkouts/hbase.apache.git 2.3.4 +# +# Just make sure any environment variables needed are predefined +# in your context. +# DRY_RUN=${DRY_RUN:-1} #default to dry run DEBUG=${DEBUG:-0} GPG=${GPG:-gpg} @@ -26,8 +37,6 @@ fi # Maven Profiles for publishing snapshots and release to Maven Central and Dist PUBLISH_PROFILES=("-P" "apache-release,release") -set -e - function error { log "Error: $*" >&2 exit 1 @@ -478,6 +487,7 @@ function generate_api_report { } # Look up the Jira name associated with project. +# Returns result on stdout. # Currently all the 'hbase-*' projects share the same HBASE jira name. This works because, # by convention, the HBASE jira "Fix Version" field values have the sub-project name pre-pended, # as in "hbase-operator-tools-1.0.0". @@ -492,7 +502,7 @@ function get_jira_name { if [[ -z "$jira_name" ]]; then error "Sorry, can't determine the Jira name for project $project" fi - log "$jira_name" + echo "$jira_name" } # Update the CHANGES.md From 5424f9547012a01a7e9e41b0756c126be9470dad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Fri, 8 Jan 2021 14:10:30 -0500 Subject: [PATCH 632/769] HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml Signed-off-by: Josh Elser --- .../src/main/resources/hbase-default.xml | 4 +- .../hadoop/hbase/TestHBaseConfiguration.java | 17 ++++++++ .../src/test/resources/hdfs-default.xml | 42 +++++++++++++++++++ .../src/test/resources/hdfs-scr-enabled.xml | 42 +++++++++++++++++++ 4 files changed, 103 insertions(+), 2 deletions(-) create mode 100644 hbase-common/src/test/resources/hdfs-default.xml create mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 9092dd147198..20f3881edb2c 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - false + If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - none + This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 6a0b4283ed03..1144f1daf351 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -115,6 +115,23 @@ public void testSecurityConfCaseInsensitive() { conf.set("hbase.security.authentication", "KERBeros"); Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } + + @Test + public void testGetConfigOfShortcircuitRead() throws Exception { + Configuration conf = HBaseConfiguration.create(); + Configuration.addDefaultResource("hdfs-default.xml"); + assertEquals("hdfs-default.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("false", conf.get("dfs.client.read.shortcircuit")); + assertNull(conf.get("dfs.domain.socket.path")); + Configuration.addDefaultResource("hdfs-scr-enabled.xml"); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.domain.socket.path")[0]); + assertEquals("true", conf.get("dfs.client.read.shortcircuit")); + assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); + } private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = diff --git a/hbase-common/src/test/resources/hdfs-default.xml b/hbase-common/src/test/resources/hdfs-default.xml new file mode 100644 index 000000000000..fdb3c36edc87 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-default.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + false + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml new file mode 100644 index 000000000000..8594494782c5 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + true + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + /var/lib/hadoop-hdfs/dn_socket + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + From a9a2ae5d43e5bada6ff0620e20240968efd0b200 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 8 Jan 2021 14:43:56 -0800 Subject: [PATCH 633/769] HBASE-25489 improve performance of --parse-release-tags (#2867) Profiler shows a lot of time spent in the UPDATE SQL statement. Remove the tight loop and let SQL do a bulk-update instead. Signed-off-by: Huaxiang Sun Signed-off-by: Michael Stack --- .../git_jira_release_audit.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/dev-support/git-jira-release-audit/git_jira_release_audit.py b/dev-support/git-jira-release-audit/git_jira_release_audit.py index 358dfd533502..f8066c44e8f5 100644 --- a/dev-support/git-jira-release-audit/git_jira_release_audit.py +++ b/dev-support/git-jira-release-audit/git_jira_release_audit.py @@ -122,16 +122,21 @@ def flush_commits(self): """Commit any pending changes to the database.""" self.conn.commit() - def apply_git_tag(self, branch, git_sha, git_tag): + def apply_git_tag(self, branch, git_tag, git_shas): """Annotate a commit in the commits database as being a part of the specified release. Args: branch (str): The name of the git branch from which the commit originates. - git_sha (str): The commit's SHA. git_tag (str): The first release tag following the commit. + git_shas: The commits' SHAs. """ - self.conn.execute("UPDATE git_commits SET git_tag = ? WHERE branch = ? AND git_sha = ?", - (git_tag, branch, git_sha)) + self.conn.execute( + ( + f"UPDATE git_commits SET git_tag = ?" + f" WHERE branch = ?" + f" AND git_sha in ({','.join('?' for _ in git_shas)})" + ), + [git_tag, branch] + git_shas) def apply_fix_version(self, jira_id, fix_version): """Annotate a Jira issue in the jira database as being part of the specified release @@ -327,12 +332,7 @@ def _extract_release_tag(self, commit): return None def _set_release_tag(self, branch, tag, shas): - cnt = 0 - for sha in shas: - self._db.apply_git_tag(branch, sha, tag) - cnt += 1 - if cnt % 50 == 0: - self._db.flush_commits() + self._db.apply_git_tag(branch, tag, shas) self._db.flush_commits() def _resolve_ambiguity(self, commit): From 4bbcf9ad76c0d488e0bc2a59fbd1a5dc1c1fd814 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 8 Jan 2021 14:45:45 -0800 Subject: [PATCH 634/769] HBASE-25473 [create-release] checkcompatibility.py failing with "KeyError: 'binary'" (#2862) Exclude hbase-shaded-testing-util*.jar from checkcompatibility; this jar can not be unzipped on a case-insensitive filesystem. Added some means of debug into the checkcompatibility to help when cryptic failures. Signed-off-by: Nick Dimiduk --- dev-support/checkcompatibility.py | 10 +++++++++- dev-support/create-release/release-util.sh | 10 ++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/dev-support/checkcompatibility.py b/dev-support/checkcompatibility.py index b764aaaec17d..d39599aa3ea1 100755 --- a/dev-support/checkcompatibility.py +++ b/dev-support/checkcompatibility.py @@ -229,7 +229,7 @@ def compare_results(tool_results, known_issues, compare_warnings): observed_count=tool_results[check][issue_type]) for check, known_issue_counts in known_issues.items() for issue_type, known_count in known_issue_counts.items() - if tool_results[check][issue_type] > known_count] + if compare_tool_results_count(tool_results, check, issue_type, known_count)] if not compare_warnings: unexpected_issues = [tup for tup in unexpected_issues @@ -241,6 +241,14 @@ def compare_results(tool_results, known_issues, compare_warnings): return bool(unexpected_issues) +def compare_tool_results_count(tool_results, check, issue_type, known_count): + """ Check problem counts are no more than the known count. + (This function exists just so can add in logging; previous was inlined + one-liner but this made it hard debugging) + """ + # logging.info("known_count=%s, check key=%s, tool_results=%s, issue_type=%s", + # str(known_count), str(check), str(tool_results), str(issue_type)) + return tool_results[check][issue_type] > known_count def process_java_acc_output(output): """ Process the output string to find the problems and warnings in both the diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 48e240f3402c..d907253dffe6 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -478,8 +478,18 @@ function generate_api_report { local timing_token timing_token="$(start_step)" # Generate api report. + # Filter out some jar types. Filters are tricky. Python regex on + # file basename. Exclude the saved-aside original jars... they are + # not included in resulting artifact. Also, do not include the + # hbase-shaded-testing-util.* jars. This jar is unzip'able on mac + # os x as is because has it a META_INF/LICENSE file and then a + # META_INF/license directory for the included jar's licenses; + # it fails to unjar on mac os x which this tool does making its checks + # (Its exclusion should be fine; it is just an aggregate of other jars). "${project}"/dev-support/checkcompatibility.py --annotation \ org.apache.yetus.audience.InterfaceAudience.Public \ + -e "original-hbase.*.jar" \ + -e "hbase-shaded-testing-util.*.jar" \ "$previous_tag" "$release_tag" previous_version="$(echo "${previous_tag}" | sed -e 's/rel\///')" cp "${project}/target/compat-check/report.html" "./api_compare_${previous_version}_to_${release_tag}.html" From c4a9d729da18a8e31b0159200cd5641a6d5885ca Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 9 Jan 2021 08:50:11 +0800 Subject: [PATCH 635/769] HBASE-25333 Add maven enforcer rule to ban VisibleForTesting imports (#2854) Signed-off-by: Peter Somogyi --- pom.xml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pom.xml b/pom.xml index 749ab83842bb..e4505d6afbcf 100755 --- a/pom.xml +++ b/pom.xml @@ -1341,6 +1341,19 @@ org.glassfish.jersey.** + + true + 512 + + You should never use this style of annotations(i.e, 'this is for test only') + in IA.Public or IA.LimitedPrivate classes. Use IA.Private to tell users this is + not for public use. + For IA.Private classes, use RestrictedApi annotation in error prone instead. + + + org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting + + From c732c229b7b5f78699a1282f1cc8894a3456c363 Mon Sep 17 00:00:00 2001 From: "Tak Lon (Stephen) Wu" Date: Fri, 8 Jan 2021 20:16:45 -0800 Subject: [PATCH 636/769] HBASE-25249 Adding StoreContext (#2800) Adding StoreContext which contains the metadata about the HStore. This metadata can be used across the HFileWriter/Readers and other HStore consumers without the need of passing around the complete store and exposing its internals. Co-authored-by: Abhishek Khanna Signed-off-by: stack Signed-off-by: Zach York --- .../hbase/mapreduce/HFileOutputFormat2.java | 5 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 6 +- .../hadoop/hbase/regionserver/HMobStore.java | 32 ++- .../hadoop/hbase/regionserver/HStore.java | 258 +++++++++--------- .../hbase/regionserver/StoreContext.java | 194 +++++++++++++ .../hadoop/hbase/regionserver/StoreUtils.java | 25 ++ .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 6 +- .../wal/BoundedRecoveredHFilesOutputSink.java | 6 +- .../apache/hadoop/hbase/io/TestHeapSize.java | 3 +- .../hbase/regionserver/TestHRegion.java | 2 +- .../TestSecureBulkLoadManager.java | 4 +- 11 files changed, 376 insertions(+), 165 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 75b5246d2c88..ee6d5331f3f6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -369,8 +370,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = encoding == null ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) - .withDataBlockEncoding(encoding).withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) .withColumnFamily(family).withTableName(tableName); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index acc8f74a501b..2ae29385eb42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -56,9 +56,9 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -540,8 +540,8 @@ public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, boolean isCompaction) throws IOException { return createWriter(conf, fs, family, new Path(basePath, mobFileName.getFileName()), - maxKeyCount, compression, cacheConfig, cryptoContext, HStore.getChecksumType(conf), - HStore.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); + maxKeyCount, compression, cacheConfig, cryptoContext, StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 5960b8030900..7ce7f0310c7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -93,7 +93,6 @@ public class HMobStore extends HStore { private AtomicLong mobFlushedCellsSize = new AtomicLong(); private AtomicLong mobScanCellsCount = new AtomicLong(); private AtomicLong mobScanCellsSize = new AtomicLong(); - private ColumnFamilyDescriptor family; private Map> map = new ConcurrentHashMap<>(); private final IdLock keyLock = new IdLock(); // When we add a MOB reference cell to the HFile, we will add 2 tags along with it @@ -107,16 +106,15 @@ public class HMobStore extends HStore { public HMobStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { super(region, family, confParam, warmup); - this.family = family; this.mobFileCache = region.getMobFileCache(); this.homePath = MobUtils.getMobHome(conf); this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(), - family.getNameAsString()); + getColumnFamilyName()); List locations = new ArrayList<>(2); locations.add(mobFamilyPath); TableName tn = region.getTableDescriptor().getTableName(); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn) - .getEncodedName(), family.getNameAsString())); + .getEncodedName(), getColumnFamilyName())); map.put(tn, locations); List tags = new ArrayList<>(2); tags.add(MobConstants.MOB_REF_TAG); @@ -209,7 +207,7 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException { MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID() - .toString().replaceAll("-", ""), region.getRegionInfo().getEncodedName()); + .toString().replaceAll("-", ""), getHRegion().getRegionInfo().getEncodedName()); return createWriterInTmp(mobFileName, basePath, maxKeyCount, compression, isCompaction); } @@ -226,9 +224,11 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, long maxKeyCount, Compression.Algorithm compression, boolean isCompaction) throws IOException { - return MobUtils.createWriter(conf, region.getFilesystem(), family, - new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, cacheConf, - cryptoContext, checksumType, bytesPerChecksum, blocksize, BloomType.NONE, isCompaction); + return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), + new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), + getStoreContext().getEncryptionContext(), StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), getStoreContext().getBlockSize(), BloomType.NONE, + isCompaction); } /** @@ -245,10 +245,10 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio validateMobFile(sourceFile); LOG.info(" FLUSH Renaming flushed file from {} to {}", sourceFile, dstPath); Path parent = dstPath.getParent(); - if (!region.getFilesystem().exists(parent)) { - region.getFilesystem().mkdirs(parent); + if (!getFileSystem().exists(parent)) { + getFileSystem().mkdirs(parent); } - if (!region.getFilesystem().rename(sourceFile, dstPath)) { + if (!getFileSystem().rename(sourceFile, dstPath)) { throw new IOException("Failed rename of " + sourceFile + " to " + dstPath); } } @@ -261,7 +261,7 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio private void validateMobFile(Path path) throws IOException { HStoreFile storeFile = null; try { - storeFile = new HStoreFile(region.getFilesystem(), path, conf, this.cacheConf, + storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE, isPrimaryReplicaStore()); storeFile.initReader(); } catch (IOException e) { @@ -352,9 +352,11 @@ public List getLocations(TableName tableName) throws IOException { locations = map.get(tableName); if (locations == null) { locations = new ArrayList<>(2); - locations.add(MobUtils.getMobFamilyPath(conf, tableName, family.getNameAsString())); + locations.add(MobUtils.getMobFamilyPath(conf, tableName, getColumnFamilyDescriptor() + .getNameAsString())); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tableName, - MobUtils.getMobRegionInfo(tableName).getEncodedName(), family.getNameAsString())); + MobUtils.getMobRegionInfo(tableName).getEncodedName(), getColumnFamilyDescriptor() + .getNameAsString())); map.put(tableName, locations); } } finally { @@ -388,7 +390,7 @@ private MobCell readCell(List locations, String fileName, Cell search, MobFile file = null; Path path = new Path(location, fileName); try { - file = mobFileCache.openFile(fs, path, cacheConf); + file = mobFileCache.openFile(fs, path, getCacheConfig()); return readPt != -1 ? file.readCell(search, cacheMobBlocks, readPt) : file.readCell(search, cacheMobBlocks); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 3a71c230bebe..99880efece73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -157,11 +156,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected final MemStore memstore; // This stores directory in the filesystem. - protected final HRegion region; - private final ColumnFamilyDescriptor family; - private final HRegionFileSystem fs; + private final HRegion region; protected Configuration conf; - protected CacheConfig cacheConf; private long lastCompactSize = 0; volatile boolean forceMajor = false; private AtomicLong storeSize = new AtomicLong(); @@ -215,16 +211,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private final Set changedReaderObservers = Collections.newSetFromMap(new ConcurrentHashMap()); - protected final int blocksize; private HFileDataBlockEncoder dataBlockEncoder; - /** Checksum configuration */ - protected ChecksumType checksumType; - protected int bytesPerChecksum; - - // Comparing KeyValues - protected final CellComparator comparator; - final StoreEngine storeEngine; private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean(); @@ -236,7 +224,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private long blockingFileCount; private int compactionCheckMultiplier; - protected Encryption.Context cryptoContext = Encryption.Context.NONE; private AtomicLong flushedCellsCount = new AtomicLong(); private AtomicLong compactedCellsCount = new AtomicLong(); @@ -246,6 +233,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); + private final StoreContext storeContext; + /** * Constructor * @param family HColumnDescriptor for this column @@ -254,12 +243,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected HStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { - this.fs = region.getRegionFileSystem(); - - // Assemble the store's home directory and Ensure it exists. - fs.createStoreDir(family.getNameAsString()); - this.region = region; - this.family = family; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. @@ -268,18 +251,22 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, .addBytesMap(region.getTableDescriptor().getValues()) .addStringMap(family.getConfiguration()) .addBytesMap(family.getValues()); - this.blocksize = family.getBlocksize(); + + this.region = region; + this.storeContext = initializeStoreContext(family); + + // Assemble the store's home directory and Ensure it exists. + region.getRegionFileSystem().createStoreDir(family.getNameAsString()); // set block storage policy for store directory String policyName = family.getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } - this.fs.setStoragePolicy(family.getNameAsString(), policyName.trim()); + region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName.trim()); this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); - this.comparator = region.getCellComparator(); // used by ScanQueryMatcher long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); @@ -288,14 +275,11 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, long ttl = determineTTLFromFamily(family); // Why not just pass a HColumnDescriptor in here altogether? Even if have // to clone it? - scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); + scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, region.getCellComparator()); this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); - // Setting up cache configuration for this family - createCacheConf(family); - this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false); this.blockingFileCount = @@ -308,7 +292,7 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; } - this.storeEngine = createStoreEngine(this, this.conf, this.comparator); + this.storeEngine = createStoreEngine(this, this.conf, region.getCellComparator()); List hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and @@ -318,10 +302,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles); - // Initialize checksum type from name. The names are CRC32, CRC32C, etc. - this.checksumType = getChecksumType(conf); - // Initialize bytes per checksum - this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE); @@ -330,7 +310,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, "hbase.hstore.flush.retries.number must be > 0, not " + flushRetriesNumber); } - cryptoContext = EncryptionUtil.createEncryptionContext(conf, family); int confPrintThreshold = this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50); @@ -347,6 +326,32 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, cacheOnWriteLogged = false; } + private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { + return new StoreContext.Builder() + .withBlockSize(family.getBlocksize()) + .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) + .withBloomType(family.getBloomFilterType()) + .withCacheConfig(createCacheConf(family)) + .withCellComparator(region.getCellComparator()) + .withColumnFamilyDescriptor(family) + .withCompactedFilesSupplier(this::getCompactedFiles) + .withRegionFileSystem(region.getRegionFileSystem()) + .withFavoredNodesSupplier(this::getFavoredNodes) + .withFamilyStoreDirectoryPath(region.getRegionFileSystem() + .getStoreDir(family.getNameAsString())) + .withRegionCoprocessorHost(region.getCoprocessorHost()) + .build(); + } + + private InetSocketAddress[] getFavoredNodes() { + InetSocketAddress[] favoredNodes = null; + if (region.getRegionServerServices() != null) { + favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( + region.getRegionInfo().getEncodedName()); + } + return favoredNodes; + } + /** * @return MemStore Instance to use in this store. */ @@ -358,7 +363,7 @@ private MemStore getMemstore() { inMemoryCompaction = MemoryCompactionPolicy.valueOf( conf.get("hbase.systemtables.compacting.memstore.type", "NONE")); } else { - inMemoryCompaction = family.getInMemoryCompaction(); + inMemoryCompaction = getColumnFamilyDescriptor().getInMemoryCompaction(); } if (inMemoryCompaction == null) { inMemoryCompaction = @@ -368,13 +373,13 @@ private MemStore getMemstore() { switch (inMemoryCompaction) { case NONE: ms = ReflectionUtils.newInstance(DefaultMemStore.class, - new Object[] { conf, this.comparator, + new Object[] { conf, getComparator(), this.getHRegion().getRegionServicesForStores()}); break; default: Class clz = conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class); - ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this, + ms = ReflectionUtils.newInstance(clz, new Object[]{conf, getComparator(), this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction}); } return ms; @@ -384,10 +389,12 @@ private MemStore getMemstore() { * Creates the cache config. * @param family The current column family. */ - protected void createCacheConf(final ColumnFamilyDescriptor family) { - this.cacheConf = new CacheConfig(conf, family, region.getBlockCache(), + protected CacheConfig createCacheConf(final ColumnFamilyDescriptor family) { + CacheConfig cacheConf = new CacheConfig(conf, family, region.getBlockCache(), region.getRegionServicesForStores().getByteBuffAllocator()); - LOG.info("Created cacheConfig: " + this.getCacheConfig() + " for " + this); + LOG.info("Created cacheConfig: {}, for column family {} of region {} ", cacheConf, + family.getNameAsString(), region.getRegionInfo().getEncodedName()); + return cacheConf; } /** @@ -400,7 +407,7 @@ protected void createCacheConf(final ColumnFamilyDescriptor family) { */ protected StoreEngine createStoreEngine(HStore store, Configuration conf, CellComparator kvComparator) throws IOException { - return StoreEngine.create(store, conf, comparator); + return StoreEngine.create(store, conf, kvComparator); } /** @@ -421,9 +428,13 @@ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { return ttl; } + StoreContext getStoreContext() { + return storeContext; + } + @Override public String getColumnFamilyName() { - return this.family.getNameAsString(); + return this.storeContext.getFamily().getNameAsString(); } @Override @@ -433,11 +444,11 @@ public TableName getTableName() { @Override public FileSystem getFileSystem() { - return this.fs.getFileSystem(); + return storeContext.getRegionFileSystem().getFileSystem(); } public HRegionFileSystem getRegionFileSystem() { - return this.fs; + return storeContext.getRegionFileSystem(); } /* Implementation of StoreConfigInformation */ @@ -474,33 +485,10 @@ public long getBlockingFileCount() { } /* End implementation of StoreConfigInformation */ - /** - * Returns the configured bytesPerChecksum value. - * @param conf The configuration - * @return The bytesPerChecksum that is set in the configuration - */ - public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); - } - - /** - * Returns the configured checksum algorithm. - * @param conf The configuration - * @return The checksum algorithm that is set in the configuration - */ - public static ChecksumType getChecksumType(Configuration conf) { - String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); - if (checksumName == null) { - return ChecksumType.getDefaultChecksumType(); - } else { - return ChecksumType.nameToType(checksumName); - } - } @Override public ColumnFamilyDescriptor getColumnFamilyDescriptor() { - return this.family; + return this.storeContext.getFamily(); } @Override @@ -559,7 +547,7 @@ void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) { * from the given directory. */ private List loadStoreFiles(boolean warmup) throws IOException { - Collection files = fs.getStoreFiles(getColumnFamilyName()); + Collection files = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); return openStoreFiles(files, warmup); } @@ -610,7 +598,7 @@ private List openStoreFiles(Collection files, boolean if (ioe != null) { // close StoreFile readers boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (HStoreFile file : results) { try { if (file != null) { @@ -638,7 +626,8 @@ private List openStoreFiles(Collection files, boolean results.removeAll(filesToRemove); if (!filesToRemove.isEmpty() && this.isPrimaryReplicaStore()) { LOG.debug("Moving the files {} to archive", filesToRemove); - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); + getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), + filesToRemove); } } @@ -647,7 +636,7 @@ private List openStoreFiles(Collection files, boolean @Override public void refreshStoreFiles() throws IOException { - Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); + Collection newFiles = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } @@ -658,7 +647,7 @@ public void refreshStoreFiles() throws IOException { public void refreshStoreFiles(Collection newFiles) throws IOException { List storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { - storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); + storeFiles.add(getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file)); } refreshStoreFilesInternal(storeFiles); } @@ -735,7 +724,8 @@ protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); - HStoreFile storeFile = new HStoreFile(info, this.family.getBloomFilterType(), this.cacheConf); + HStoreFile storeFile = new HStoreFile(info, getColumnFamilyDescriptor().getBloomFilterType(), + getCacheConfig()); storeFile.initReader(); return storeFile; } @@ -818,7 +808,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this); FileSystem srcFs = srcPath.getFileSystem(conf); srcFs.access(srcPath, FsAction.READ_WRITE); - reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf); + reader = HFile.createReader(srcFs, srcPath, getCacheConfig(), isPrimaryReplicaStore(), conf); Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); @@ -855,7 +845,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { do { Cell cell = scanner.getCell(); if (prevCell != null) { - if (comparator.compareRows(prevCell, cell) > 0) { + if (getComparator().compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" @@ -892,13 +882,13 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { */ public Pair preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); - return fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); + return getRegionFileSystem().bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); } public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { Path srcPath = new Path(srcPathStr); try { - fs.commitStoreFile(srcPath, dstPath); + getRegionFileSystem().commitStoreFile(srcPath, dstPath); } finally { if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); @@ -964,8 +954,8 @@ public ImmutableCollection close() throws IOException { storeEngine.getStoreFileManager().clearCompactedFiles(); // clear the compacted files if (CollectionUtils.isNotEmpty(compactedfiles)) { - removeCompactedfiles(compactedfiles, cacheConf != null ? - cacheConf.shouldEvictOnClose() : true); + removeCompactedfiles(compactedfiles, getCacheConfig() != null ? + getCacheConfig().shouldEvictOnClose() : true); } if (!result.isEmpty()) { // initialize the thread pool for closing store files in parallel. @@ -981,7 +971,7 @@ public ImmutableCollection close() throws IOException { @Override public Void call() throws IOException { boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; f.closeStoreFile(evictOnClose); return null; } @@ -1092,7 +1082,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { FileSystem srcFs = path.getFileSystem(conf); srcFs.access(path, FsAction.READ_WRITE); try (HFile.Reader reader = - HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) { + HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional lk = reader.getLastKey(); @@ -1104,7 +1094,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { } } - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); @@ -1129,7 +1119,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); HStoreFile sf = createStoreFileAndReader(dstPath); @@ -1167,12 +1157,13 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm boolean shouldDropBehind, long totalCompactedFilesSize, String fileStoragePolicy) throws IOException { // creating new cache config for each new writer + final CacheConfig cacheConf = getCacheConfig(); final CacheConfig writerCacheConf = new CacheConfig(cacheConf); if (isCompaction) { // Don't cache data on write on compactions, unless specifically configured to do so // Cache only when total file size remains lower than configured threshold final boolean cacheCompactedBlocksOnWrite = - cacheConf.shouldCacheCompactedBlocksOnWrite(); + getCacheConfig().shouldCacheCompactedBlocksOnWrite(); // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well @@ -1206,53 +1197,48 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm } } } - InetSocketAddress[] favoredNodes = null; - if (region.getRegionServerServices() != null) { - favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( - region.getRegionInfo().getEncodedName()); - } + Encryption.Context encryptionContext = storeContext.getEncryptionContext(); HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, - cryptoContext); - Path familyTempDir = new Path(fs.getTempDir(), family.getNameAsString()); - StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, - this.getFileSystem()) - .withOutputDir(familyTempDir) - .withBloomType(family.getBloomFilterType()) - .withMaxKeyCount(maxKeyCount) - .withFavoredNodes(favoredNodes) - .withFileContext(hFileContext) - .withShouldDropCacheBehind(shouldDropBehind) - .withCompactedFilesSupplier(this::getCompactedFiles) - .withFileStoragePolicy(fileStoragePolicy); + encryptionContext); + Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName()); + StoreFileWriter.Builder builder = + new StoreFileWriter.Builder(conf, writerCacheConf, getFileSystem()) + .withOutputDir(familyTempDir) + .withBloomType(storeContext.getBloomFilterType()) + .withMaxKeyCount(maxKeyCount) + .withFavoredNodes(storeContext.getFavoredNodes()) + .withFileContext(hFileContext) + .withShouldDropCacheBehind(shouldDropBehind) + .withCompactedFilesSupplier(storeContext.getCompactedFilesSupplier()) + .withFileStoragePolicy(fileStoragePolicy); return builder.build(); } private HFileContext createFileContext(Compression.Algorithm compression, - boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) { + boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } + ColumnFamilyDescriptor family = getColumnFamilyDescriptor(); HFileContext hFileContext = new HFileContextBuilder() - .withIncludesMvcc(includeMVCCReadpoint) - .withIncludesTags(includesTag) - .withCompression(compression) - .withCompressTags(family.isCompressTags()) - .withChecksumType(checksumType) - .withBytesPerCheckSum(bytesPerChecksum) - .withBlockSize(blocksize) - .withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()) - .withEncryptionContext(cryptoContext) - .withCreateTime(EnvironmentEdgeManager.currentTime()) - .withColumnFamily(family.getName()) - .withTableName(region.getTableDescriptor() - .getTableName().getName()) - .withCellComparator(this.comparator) - .build(); + .withIncludesMvcc(includeMVCCReadpoint) + .withIncludesTags(includesTag) + .withCompression(compression) + .withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(family.getBlocksize()) + .withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(encryptionContext) + .withCreateTime(EnvironmentEdgeManager.currentTime()) + .withColumnFamily(getColumnFamilyDescriptor().getName()) + .withTableName(getTableName().getName()) + .withCellComparator(getComparator()) + .build(); return hFileContext; } - private long getTotalSize(Collection sfs) { return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum(); } @@ -1529,7 +1515,7 @@ public List compact(CompactionContext compaction, // Ready to go. Have list of files to compact. LOG.info("Starting compaction of " + filesToCompact + - " into tmpdir=" + fs.getTempDir() + ", totalSize=" + + " into tmpdir=" + getRegionFileSystem().getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); return doCompaction(cr, filesToCompact, user, compactionStartTime, @@ -1579,7 +1565,7 @@ private void setStoragePolicyFromFileName(List newFiles) throws IOExceptio String prefix = HConstants.STORAGE_POLICY_PREFIX; for (Path newFile : newFiles) { if (newFile.getParent().getName().startsWith(prefix)) { - CommonFSUtils.setStoragePolicy(fs.getFileSystem(), newFile, + CommonFSUtils.setStoragePolicy(getRegionFileSystem().getFileSystem(), newFile, newFile.getParent().getName().substring(prefix.length())); } } @@ -1604,7 +1590,7 @@ private List moveCompactedFilesIntoPlace(CompactionRequestImpl cr, HStoreFile moveFileIntoPlace(Path newFile) throws IOException { validateStoreFile(newFile); // Move the file into the right spot - Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile); + Path destPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), newFile); return createStoreFileAndReader(destPath); } @@ -1624,8 +1610,8 @@ private void writeCompactionWalRecord(Collection filesCompacted, newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList()); RegionInfo info = this.region.getRegionInfo(); CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info, - family.getName(), inputPaths, outputPaths, - fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString())); + getColumnFamilyDescriptor().getName(), inputPaths, outputPaths, + getRegionFileSystem().getStoreDir(getColumnFamilyDescriptor().getNameAsString())); // Fix reaching into Region to get the maxWaitForSeqId. // Does this method belong in Region altogether given it is making so many references up there? // Could be Region#writeCompactionMarker(compactionDescriptor); @@ -1752,7 +1738,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick String familyName = this.getColumnFamilyName(); Set inputFiles = new HashSet<>(); for (String compactionInput : compactionInputs) { - Path inputPath = fs.getStoreFilePath(familyName, compactionInput); + Path inputPath = getRegionFileSystem().getStoreFilePath(familyName, compactionInput); inputFiles.add(inputPath.getName()); } @@ -1772,7 +1758,8 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick compactionOutputs.remove(sf.getPath().getName()); } for (String compactionOutput : compactionOutputs) { - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), compactionOutput); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); outputStoreFiles.add(storeFile); } @@ -2092,7 +2079,7 @@ int versionsToReturn(final int wantedVersions) { throw new IllegalArgumentException("Number of versions must be > 0"); } // Make sure we do not return more than maximum versions for this store. - int maxVersions = this.family.getMaxVersions(); + int maxVersions = getColumnFamilyDescriptor().getMaxVersions(); return wantedVersions > maxVersions ? maxVersions: wantedVersions; } @@ -2367,7 +2354,7 @@ public RegionCoprocessorHost getCoprocessorHost() { @Override public RegionInfo getRegionInfo() { - return this.fs.getRegionInfo(); + return getRegionFileSystem().getRegionInfo(); } @Override @@ -2509,7 +2496,8 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); @@ -2559,7 +2547,7 @@ public boolean needsCompaction() { * @return cache configuration for this Store. */ public CacheConfig getCacheConfig() { - return this.cacheConf; + return storeContext.getCacheConf(); } public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); @@ -2573,12 +2561,12 @@ public CacheConfig getCacheConfig() { @Override public long heapSize() { MemStoreSize memstoreSize = this.memstore.size(); - return DEEP_OVERHEAD + memstoreSize.getHeapSize(); + return DEEP_OVERHEAD + memstoreSize.getHeapSize() + storeContext.heapSize(); } @Override public CellComparator getComparator() { - return comparator; + return storeContext.getComparator(); } public ScanInfo getScanInfo() { @@ -2652,7 +2640,7 @@ protected OffPeakHours getOffPeakHours() { public void onConfigurationChange(Configuration conf) { this.conf = new CompoundConfiguration() .add(conf) - .addBytesMap(family.getValues()); + .addBytesMap(getColumnFamilyDescriptor().getValues()); this.storeEngine.compactionPolicy.setConf(conf); this.offPeakHours = OffPeakHours.getInstance(conf); } @@ -2784,8 +2772,8 @@ private void removeCompactedfiles(Collection compactedfiles, boolean LOG.debug("Moving the files {} to archive", filesToRemove); // Only if this is successful it has to be removed try { - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), - filesToRemove); + getRegionFileSystem() + .removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); } catch (FailedArchiveException fae) { // Even if archiving some files failed, we still need to clear out any of the // files which were successfully archived. Otherwise we will receive a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java new file mode 100644 index 000000000000..26233505db73 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.function.Supplier; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This carries the immutable information and references on some of the meta data about the HStore. + * This meta data can be used across the HFileWriter/Readers and other HStore consumers without the + * need of passing around the complete store. + */ +@InterfaceAudience.Private +public final class StoreContext implements HeapSize { + public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); + + private final int blockSize; + private final Encryption.Context encryptionContext; + private final CacheConfig cacheConf; + private final HRegionFileSystem regionFileSystem; + private final CellComparator comparator; + private final BloomType bloomFilterType; + private final Supplier> compactedFilesSupplier; + private final Supplier favoredNodesSupplier; + private final ColumnFamilyDescriptor family; + private final Path familyStoreDirectoryPath; + private final RegionCoprocessorHost coprocessorHost; + + private StoreContext(Builder builder) { + this.blockSize = builder.blockSize; + this.encryptionContext = builder.encryptionContext; + this.cacheConf = builder.cacheConf; + this.regionFileSystem = builder.regionFileSystem; + this.comparator = builder.comparator; + this.bloomFilterType = builder.bloomFilterType; + this.compactedFilesSupplier = builder.compactedFilesSupplier; + this.favoredNodesSupplier = builder.favoredNodesSupplier; + this.family = builder.family; + this.familyStoreDirectoryPath = builder.familyStoreDirectoryPath; + this.coprocessorHost = builder.coprocessorHost; + } + + public int getBlockSize() { + return blockSize; + } + + public Encryption.Context getEncryptionContext() { + return encryptionContext; + } + + public CacheConfig getCacheConf() { + return cacheConf; + } + + public HRegionFileSystem getRegionFileSystem() { + return regionFileSystem; + } + + public CellComparator getComparator() { + return comparator; + } + + public BloomType getBloomFilterType() { + return bloomFilterType; + } + + public Supplier> getCompactedFilesSupplier() { + return compactedFilesSupplier; + } + + public InetSocketAddress[] getFavoredNodes() { + return favoredNodesSupplier.get(); + } + + public ColumnFamilyDescriptor getFamily() { + return family; + } + + public Path getFamilyStoreDirectoryPath() { + return familyStoreDirectoryPath; + } + + public RegionCoprocessorHost getCoprocessorHost() { + return coprocessorHost; + } + + public static Builder getBuilder() { + return new Builder(); + } + + @Override + public long heapSize() { + return FIXED_OVERHEAD; + } + + public static class Builder { + private int blockSize; + private Encryption.Context encryptionContext; + private CacheConfig cacheConf; + private HRegionFileSystem regionFileSystem; + private CellComparator comparator; + private BloomType bloomFilterType; + private Supplier> compactedFilesSupplier; + private Supplier favoredNodesSupplier; + private ColumnFamilyDescriptor family; + private Path familyStoreDirectoryPath; + private RegionCoprocessorHost coprocessorHost; + + public Builder withBlockSize(int blockSize) { + this.blockSize = blockSize; + return this; + } + + public Builder withEncryptionContext(Encryption.Context encryptionContext) { + this.encryptionContext = encryptionContext; + return this; + } + + public Builder withCacheConfig(CacheConfig cacheConf) { + this.cacheConf = cacheConf; + return this; + } + + public Builder withRegionFileSystem(HRegionFileSystem regionFileSystem) { + this.regionFileSystem = regionFileSystem; + return this; + } + + public Builder withCellComparator(CellComparator comparator) { + this.comparator = comparator; + return this; + } + + public Builder withBloomType(BloomType bloomFilterType) { + this.bloomFilterType = bloomFilterType; + return this; + } + + public Builder withCompactedFilesSupplier(Supplier> + compactedFilesSupplier) { + this.compactedFilesSupplier = compactedFilesSupplier; + return this; + } + + public Builder withFavoredNodesSupplier(Supplier favoredNodesSupplier) { + this.favoredNodesSupplier = favoredNodesSupplier; + return this; + } + + public Builder withColumnFamilyDescriptor(ColumnFamilyDescriptor family) { + this.family = family; + return this; + } + + public Builder withFamilyStoreDirectoryPath(Path familyStoreDirectoryPath) { + this.familyStoreDirectoryPath = familyStoreDirectoryPath; + return this; + } + + public Builder withRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { + this.coprocessorHost = coprocessorHost; + return this; + } + + public StoreContext build() { + return new StoreContext(this); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 0e4f6c2bb8a4..ac5955feca7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -24,9 +24,13 @@ import java.util.OptionalInt; import java.util.OptionalLong; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -136,4 +140,25 @@ static Optional getSplitPoint(Collection storefiles, return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) : Optional.empty(); } + + /** + * Returns the configured checksum algorithm. + * @param conf The configuration + * @return The checksum algorithm that is set in the configuration + */ + public static ChecksumType getChecksumType(Configuration conf) { + return ChecksumType.nameToType( + conf.get(HConstants.CHECKSUM_TYPE_NAME, ChecksumType.getDefaultChecksumType().getName())); + } + + /** + * Returns the configured bytesPerChecksum value. + * @param conf The configuration + * @return The bytesPerChecksum that is set in the configuration + */ + public static int getBytesPerChecksum(Configuration conf) { + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, + HFile.DEFAULT_BYTES_PER_CHECKSUM); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index ec9a59c7bf5a..b0b086e145a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -83,9 +83,9 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; @@ -749,8 +749,8 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Algorithm compression = familyDescriptor.getCompressionType(); BloomType bloomFilterType = familyDescriptor.getBloomFilterType(); HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) .build(); halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 9623bd1c7220..50bc5fe62fb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.CellSet; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.EntryBuffers.RegionEntryBuffer; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -199,8 +199,8 @@ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String r new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) .withOutputDir(outputDir); HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(HStore.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(HStore.getBytesPerChecksum(walSplitter.conf)). + withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). + withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). withCellComparator(isMetaTable? MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); return writerBuilder.withFileContext(hFileContext).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 3d713052559e..3f326a30cfdf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.MemStoreCompactor; import org.apache.hadoop.hbase.regionserver.MutableSegment; import org.apache.hadoop.hbase.regionserver.Segment; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.NonSyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.SyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector; @@ -606,7 +607,7 @@ public void testObjectSize() throws IOException { @Test public void testAutoCalcFixedOverHead() { Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class, - HFileBlock.class, HStore.class, LruBlockCache.class }; + HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class }; for (Class cl : classList) { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index b56f96a51149..74f240a60198 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -7645,7 +7645,7 @@ protected List doCompaction(CompactionRequestImpl cr, LOG.warn("hbase.hstore.compaction.complete is set to false"); List sfs = new ArrayList<>(newFiles.size()); final boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (Path newFile : newFiles) { // Create storefile around what we wrote with a reader on it. HStoreFile sf = createStoreFileAndReader(newFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 12cf57671f9c..88f201efff6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -221,8 +221,8 @@ private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { .withIncludesTags(true) .withCompression(compression) .withCompressTags(family.isCompressTags()) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) .withBlockSize(family.getBlocksize()) .withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding()) From ab4011a0053cdbe0527113c385859cdd0fc4e941 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 12 Jan 2021 16:08:54 +0800 Subject: [PATCH 637/769] Revert "HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml" This reverts commit 49aba571813f649a2ff4482a2209ee9910cc72c3. --- .../src/main/resources/hbase-default.xml | 4 +- .../hadoop/hbase/TestHBaseConfiguration.java | 17 -------- .../src/test/resources/hdfs-default.xml | 42 ------------------- .../src/test/resources/hdfs-scr-enabled.xml | 42 ------------------- 4 files changed, 2 insertions(+), 103 deletions(-) delete mode 100644 hbase-common/src/test/resources/hdfs-default.xml delete mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 20f3881edb2c..9092dd147198 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - + false If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - + none This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 1144f1daf351..6a0b4283ed03 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -115,23 +115,6 @@ public void testSecurityConfCaseInsensitive() { conf.set("hbase.security.authentication", "KERBeros"); Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } - - @Test - public void testGetConfigOfShortcircuitRead() throws Exception { - Configuration conf = HBaseConfiguration.create(); - Configuration.addDefaultResource("hdfs-default.xml"); - assertEquals("hdfs-default.xml", - conf.getPropertySources("dfs.client.read.shortcircuit")[0]); - assertEquals("false", conf.get("dfs.client.read.shortcircuit")); - assertNull(conf.get("dfs.domain.socket.path")); - Configuration.addDefaultResource("hdfs-scr-enabled.xml"); - assertEquals("hdfs-scr-enabled.xml", - conf.getPropertySources("dfs.client.read.shortcircuit")[0]); - assertEquals("hdfs-scr-enabled.xml", - conf.getPropertySources("dfs.domain.socket.path")[0]); - assertEquals("true", conf.get("dfs.client.read.shortcircuit")); - assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); - } private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = diff --git a/hbase-common/src/test/resources/hdfs-default.xml b/hbase-common/src/test/resources/hdfs-default.xml deleted file mode 100644 index fdb3c36edc87..000000000000 --- a/hbase-common/src/test/resources/hdfs-default.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - dfs.client.read.shortcircuit - false - - If set to true, this configuration parameter enables short-circuit local - reads. - - - - dfs.domain.socket.path - - - Optional. This is a path to a UNIX domain socket that will be used for - communication between the DataNode and local HDFS clients. - If the string "_PORT" is present in this path, it will be replaced by the - TCP port of the DataNode. - - - diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml deleted file mode 100644 index 8594494782c5..000000000000 --- a/hbase-common/src/test/resources/hdfs-scr-enabled.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - dfs.client.read.shortcircuit - true - - If set to true, this configuration parameter enables short-circuit local - reads. - - - - dfs.domain.socket.path - /var/lib/hadoop-hdfs/dn_socket - - Optional. This is a path to a UNIX domain socket that will be used for - communication between the DataNode and local HDFS clients. - If the string "_PORT" is present in this path, it will be replaced by the - TCP port of the DataNode. - - - From 788fa3ccf8ece2bf88c95cfbb2fac8f2b4ff2919 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 13 Jan 2021 05:01:26 +0530 Subject: [PATCH 638/769] HBASE-25211 : Rack awareness in RegionMover (#2795) Signed-off-by: Andrew Purtell --- .../hadoop/hbase/master/RackManager.java | 3 - .../apache/hadoop/hbase/util/RegionMover.java | 68 ++++++- .../hadoop/hbase/util/TestRegionMover3.java | 188 ++++++++++++++++++ 3 files changed, 254 insertions(+), 5 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java index 3ed20065a672..54ccac0cb629 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java @@ -22,8 +22,6 @@ import java.util.List; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -36,7 +34,6 @@ */ @InterfaceAudience.Private public class RackManager { - private static final Logger LOG = LoggerFactory.getLogger(RackManager.class); public static final String UNKNOWN_RACK = "Unknown Rack"; private DNSToSwitchMapping switchMapping; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 08042efda68f..210e9e17a39f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; @@ -86,6 +87,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { public static final int DEFAULT_MOVE_RETRIES_MAX = 5; public static final int DEFAULT_MOVE_WAIT_MAX = 60; public static final int DEFAULT_SERVERSTART_WAIT_MAX = 180; + private final RackManager rackManager; private static final Logger LOG = LoggerFactory.getLogger(RegionMover.class); @@ -114,9 +116,16 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { setConf(builder.conf); this.conn = ConnectionFactory.createConnection(conf); this.admin = conn.getAdmin(); + // Only while running unit tests, builder.rackManager will not be null for the convenience of + // providing custom rackManager. Otherwise for regular workflow/user triggered action, + // builder.rackManager is supposed to be null. Hence, setter of builder.rackManager is + // provided as @InterfaceAudience.Private and it is commented that this is just + // to be used by unit test. + rackManager = builder.rackManager == null ? new RackManager(conf) : builder.rackManager; } private RegionMover() { + rackManager = new RackManager(conf); } @Override @@ -143,6 +152,7 @@ public static class RegionMoverBuilder { @InterfaceAudience.Private final int port; private final Configuration conf; + private RackManager rackManager; public RegionMoverBuilder(String hostname) { this(hostname, createConf()); @@ -245,6 +255,19 @@ public RegionMoverBuilder timeout(int timeout) { return this; } + /** + * Set specific rackManager implementation. + * This setter method is for testing purpose only. + * + * @param rackManager rackManager impl + * @return RegionMoverBuilder object + */ + @InterfaceAudience.Private + public RegionMoverBuilder rackManager(RackManager rackManager) { + this.rackManager = rackManager; + return this; + } + /** * This method builds the appropriate RegionMover object which can then be used to load/unload * using load and unload methods @@ -328,9 +351,31 @@ private void loadRegions(List regionsToMove) * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions * to hostnames provided in {@link #designatedFile} + * * @return true if unloading succeeded, false otherwise */ public boolean unload() throws InterruptedException, ExecutionException, TimeoutException { + return unloadRegions(false); + } + + /** + * Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.In + * noAck mode we do not make sure that region is successfully online on the target region + * server,hence it is best effort.We do not unload regions to hostnames given in + * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions + * to hostnames provided in {@link #designatedFile}. + * While unloading regions, destination RegionServers are selected from different rack i.e + * regions should not move to any RegionServers that belong to same rack as source RegionServer. + * + * @return true if unloading succeeded, false otherwise + */ + public boolean unloadFromRack() + throws InterruptedException, ExecutionException, TimeoutException { + return unloadRegions(true); + } + + private boolean unloadRegions(boolean unloadFromRack) throws InterruptedException, + ExecutionException, TimeoutException { deleteFile(this.filename); ExecutorService unloadPool = Executors.newFixedThreadPool(1); Future unloadTask = unloadPool.submit(() -> { @@ -355,6 +400,23 @@ public boolean unload() throws InterruptedException, ExecutionException, Timeout // Remove RS present in the exclude file includeExcludeRegionServers(excludeFile, regionServers, false); + if (unloadFromRack) { + // remove regionServers that belong to same rack (as source host) since the goal is to + // unload regions from source regionServer to destination regionServers + // that belong to different rack only. + String sourceRack = rackManager.getRack(server); + List racks = rackManager.getRack(regionServers); + Iterator iterator = regionServers.iterator(); + int i = 0; + while (iterator.hasNext()) { + iterator.next(); + if (racks.size() > i && racks.get(i) != null && racks.get(i).equals(sourceRack)) { + iterator.remove(); + } + i++; + } + } + // Remove decommissioned RS Set decommissionedRS = new HashSet<>(admin.listDecommissionedRegionServers()); if (CollectionUtils.isNotEmpty(decommissionedRS)) { @@ -651,7 +713,7 @@ private void stripMaster(List regionServers) throws IOException { private ServerName stripServer(List regionServers, String hostname, int port) { for (Iterator iter = regionServers.iterator(); iter.hasNext();) { ServerName server = iter.next(); - if (server.getAddress().getHostname().equalsIgnoreCase(hostname) && + if (server.getAddress().getHostName().equalsIgnoreCase(hostname) && server.getAddress().getPort() == port) { iter.remove(); return server; @@ -663,7 +725,7 @@ private ServerName stripServer(List regionServers, String hostname, @Override protected void addOptions() { this.addRequiredOptWithArg("r", "regionserverhost", "region server |"); - this.addRequiredOptWithArg("o", "operation", "Expected: load/unload"); + this.addRequiredOptWithArg("o", "operation", "Expected: load/unload/unload_from_rack"); this.addOptWithArg("m", "maxthreads", "Define the maximum number of threads to use to unload and reload the regions"); this.addOptWithArg("x", "excludefile", @@ -716,6 +778,8 @@ protected int doWork() throws Exception { success = rm.load(); } else if (loadUnload.equalsIgnoreCase("unload")) { success = rm.unload(); + } else if (loadUnload.equalsIgnoreCase("unload_from_rack")) { + success = rm.unloadFromRack(); } else { printUsage(); success = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java new file mode 100644 index 000000000000..1903fa6bf5b6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.RackManager; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + + +@Category({ MiscTests.class, LargeTests.class}) +public class TestRegionMover3 { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRegionMover3.class); + + @Rule + public TestName name = new TestName(); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static ServerName rs0; + private static ServerName rs1; + private static ServerName rs2; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + rs0 = cluster.getRegionServer(0).getServerName(); + rs1 = cluster.getRegionServer(1).getServerName(); + rs2 = cluster.getRegionServer(2).getServerName(); + TEST_UTIL.getAdmin().balancerSwitch(false, true); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setUp() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + int startKey = 0; + int endKey = 80000; + TEST_UTIL.getAdmin().createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9); + } + + @Test + public void testRegionUnloadWithRack() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + Admin admin = TEST_UTIL.getAdmin(); + Table table = TEST_UTIL.getConnection().getTable(tableName); + List puts = IntStream.range(10, 50000) + .mapToObj(i -> new Put(Bytes.toBytes(i)) + .addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))) + .collect(Collectors.toList()); + table.put(puts); + admin.flush(tableName); + admin.compact(tableName); + Thread.sleep(3000); + HRegionServer hRegionServer0 = cluster.getRegionServer(0); + HRegionServer hRegionServer1 = cluster.getRegionServer(1); + HRegionServer hRegionServer2 = cluster.getRegionServer(2); + int numRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int numRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + int numRegions2 = hRegionServer2.getNumberOfOnlineRegions(); + + Assert.assertTrue(numRegions0 >= 3); + Assert.assertTrue(numRegions1 >= 3); + Assert.assertTrue(numRegions2 >= 3); + int totalRegions = numRegions0 + numRegions1 + numRegions2; + + // source RS: rs0 + String sourceRSName = rs0.getAddress().toString(); + + // move all regions from rs1 to rs0 + for (HRegion region : hRegionServer1.getRegions()) { + TEST_UTIL.getAdmin().move(region.getRegionInfo().getEncodedNameAsBytes(), rs0); + } + TEST_UTIL.waitFor(5000, () -> { + int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int newNumRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + return newNumRegions1 == 0 && newNumRegions0 == (numRegions0 + numRegions1); + }); + + // regionMover obj on rs0. While unloading regions from rs0 + // with default rackManager, which resolves "/default-rack" for each server, no region + // is moved while using unloadFromRack() as all rs belong to same rack. + RegionMover.RegionMoverBuilder rmBuilder = + new RegionMover.RegionMoverBuilder(sourceRSName, TEST_UTIL.getConfiguration()) + .ack(true) + .maxthreads(8); + try (RegionMover regionMover = rmBuilder.build()) { + regionMover.unloadFromRack(); + int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int newNumRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + int newNumRegions2 = hRegionServer2.getNumberOfOnlineRegions(); + Assert.assertEquals(0, newNumRegions1); + Assert.assertEquals(totalRegions, newNumRegions0 + newNumRegions2); + } + + // use custom rackManager, which resolves "rack-1" for rs0 and rs1, + // while "rack-2" for rs2. Hence, unloadFromRack() from rs0 should move all + // regions that belong to rs0 to rs2 only, and nothing should be moved to rs1 + // as rs0 and rs1 belong to same rack. + rmBuilder.rackManager(new MockRackManager()); + try (RegionMover regionMover = rmBuilder.build()) { + regionMover.unloadFromRack(); + int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions(); + int newNumRegions1 = hRegionServer1.getNumberOfOnlineRegions(); + int newNumRegions2 = hRegionServer2.getNumberOfOnlineRegions(); + Assert.assertEquals(0, newNumRegions0); + Assert.assertEquals(0, newNumRegions1); + Assert.assertEquals(totalRegions, newNumRegions2); + } + + } + + private static class MockRackManager extends RackManager { + + private static final String RACK_2 = "rack-2"; + private static final String RACK_1 = "rack-1"; + + @Override + public String getRack(ServerName server) { + return rs2.equals(server) ? RACK_2 : RACK_1; + } + + @Override + public List getRack(List servers) { + List racks = new ArrayList<>(); + servers.forEach(serverName -> { + if (rs2.equals(serverName)) { + racks.add(RACK_2); + } else { + racks.add(RACK_1); + } + }); + return racks; + } + } + +} From 04be7e6749e90c4d21087b4fcd3bcca661683d34 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 13 Jan 2021 09:56:58 +0800 Subject: [PATCH 639/769] HBASE-25476 Enable error prone check in pre commit (#2860) Signed-off-by: Guanghao Zhang --- dev-support/Jenkinsfile_GitHub | 4 +++- dev-support/jenkins_precommit_github_yetus.sh | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/dev-support/Jenkinsfile_GitHub b/dev-support/Jenkinsfile_GitHub index d25386717739..a725f1dbce4f 100644 --- a/dev-support/Jenkinsfile_GitHub +++ b/dev-support/Jenkinsfile_GitHub @@ -38,7 +38,7 @@ pipeline { YETUS_DRIVER_REL = "${SRC_REL}/dev-support/jenkins_precommit_github_yetus.sh" // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' YETUS_VERSION = 'rel/0.12.0' - GENERAL_CHECK_PLUGINS = 'all,-compile,-javac,-javadoc,-jira,-shadedjars,-unit' + GENERAL_CHECK_PLUGINS = 'all,-javadoc,-jira,-shadedjars,-unit' JDK_SPECIFIC_PLUGINS = 'compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars,unit' // output from surefire; sadly the archive function in yetus only works on file names. ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump' @@ -168,6 +168,7 @@ pipeline { BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + SKIP_ERRORPRONE = true } steps { dir("${SOURCEDIR}") { @@ -268,6 +269,7 @@ pipeline { BUILD_URL_ARTIFACTS = "artifact/${WORKDIR_REL}/${PATCH_REL}" DOCKERFILE = "${WORKDIR}/${DOCKERFILE_REL}" YETUS_DRIVER = "${WORKDIR}/${YETUS_DRIVER_REL}" + SKIP_ERRORPRONE = true } steps { dir("${SOURCEDIR}") { diff --git a/dev-support/jenkins_precommit_github_yetus.sh b/dev-support/jenkins_precommit_github_yetus.sh index 1c489d6f28bb..5bb2b1b755a4 100755 --- a/dev-support/jenkins_precommit_github_yetus.sh +++ b/dev-support/jenkins_precommit_github_yetus.sh @@ -122,7 +122,10 @@ YETUS_ARGS+=("--whitespace-tabs-ignore-list=.*/generated/.*") YETUS_ARGS+=("--tests-filter=${TESTS_FILTER}") YETUS_ARGS+=("--personality=${SOURCEDIR}/dev-support/hbase-personality.sh") YETUS_ARGS+=("--quick-hadoopcheck") -YETUS_ARGS+=("--skip-errorprone") +if [[ "${SKIP_ERRORPRONE}" = "true" ]]; then + # skip error prone + YETUS_ARGS+=("--skip-errorprone") +fi # effectively treat dev-support as a custom maven module YETUS_ARGS+=("--skip-dirs=dev-support") # For testing with specific hadoop version. Activates corresponding profile in maven runs. From 983233cbc67a714d0ab9355b2f87a5b6660e2e1a Mon Sep 17 00:00:00 2001 From: caroliney14 Date: Tue, 12 Jan 2021 22:40:12 -0800 Subject: [PATCH 640/769] HBASE-25329 Dump ritsOverThreshold in logs (#2761) Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani --- .../hadoop/hbase/master/assignment/AssignmentManager.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 4d0e165456d1..1eb39028f454 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -1425,6 +1425,13 @@ protected void update(final AssignmentManager am) { this.statTimestamp = EnvironmentEdgeManager.currentTime(); update(regionStates.getRegionsStateInTransition(), statTimestamp); update(regionStates.getRegionFailedOpen(), statTimestamp); + + if (LOG.isDebugEnabled() && ritsOverThreshold != null && !ritsOverThreshold.isEmpty()) { + LOG.debug("RITs over threshold: {}", + ritsOverThreshold.entrySet().stream() + .map(e -> e.getKey() + ":" + e.getValue().getState().name()) + .collect(Collectors.joining("\n"))); + } } private void update(final Collection regions, final long currentTime) { From cebcafb8ed24fcec029bed061ac0b83bcfa85265 Mon Sep 17 00:00:00 2001 From: Mohammad Arshad Date: Wed, 13 Jan 2021 23:19:38 +0530 Subject: [PATCH 641/769] HBASE-25503: HBase code download is failing on windows with invalid path error (#2880) --- ...HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf} | Bin 1 file changed, 0 insertions(+), 0 deletions(-) rename dev-support/design-docs/{HBASE-18070-ROOT_hbase:meta_Region_Replicas.pdf => HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf} (100%) diff --git a/dev-support/design-docs/HBASE-18070-ROOT_hbase:meta_Region_Replicas.pdf b/dev-support/design-docs/HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf similarity index 100% rename from dev-support/design-docs/HBASE-18070-ROOT_hbase:meta_Region_Replicas.pdf rename to dev-support/design-docs/HBASE-18070-ROOT_hbase_meta_Region_Replicas.pdf From 8ee0c1aa859a4a605e8548c633caac07217dec7c Mon Sep 17 00:00:00 2001 From: Pankaj Date: Thu, 14 Jan 2021 00:31:26 +0530 Subject: [PATCH 642/769] HBASE-25502 IntegrationTestMTTR fails with TableNotFoundException (#2879) --- .../java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index d15a9d650526..d9d8cbad39af 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -233,8 +233,7 @@ private static void setupTables() throws IOException { } // Create the table. If this fails then fail everything. - TableDescriptor tableDescriptor = util.getAdmin().getDescriptor(tableName); - TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); // Make the max file size huge so that splits don't happen during the test. builder.setMaxFileSize(Long.MAX_VALUE); From c2ee23ed6e4d9ec343e78dcf115f9d00f579a7db Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Thu, 14 Jan 2021 03:26:32 +0800 Subject: [PATCH 643/769] HBASE-25495 fix comment error of admin.rb (#2873) Co-authored-by: stevenxi Signed-off-by: Viraj Jasani --- hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 6228ad78486d..d874d6337b84 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1414,7 +1414,7 @@ def create_namespace(namespace_name, *args) #---------------------------------------------------------------------------------------------- # modify a namespace def alter_namespace(namespace_name, *args) - # Fail if table name is not a string + # Fail if namespace name is not a string raise(ArgumentError, 'Namespace name must be of type String') unless namespace_name.is_a?(String) nsd = @admin.getNamespaceDescriptor(namespace_name) From c915d9746766bd0a0600d511266a0f7749b90dc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Tue, 12 Jan 2021 09:06:13 -0500 Subject: [PATCH 644/769] HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml Revert of the revert -- re-applying HBASE-25449 with a change of renaming the test hdfs XML configuration file as it was adversely affecting tests using MiniDFS This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1. Co-authored-by: Josh Elser Signed-off-by: Peter Somogyi Signed-off-by: Michael Stack Signed-off-by: Duo Zhang --- .../src/main/resources/hbase-default.xml | 4 +- .../hadoop/hbase/TestHBaseConfiguration.java | 17 ++++++++ .../src/test/resources/hdfs-scr-disabled.xml | 42 +++++++++++++++++++ .../src/test/resources/hdfs-scr-enabled.xml | 42 +++++++++++++++++++ 4 files changed, 103 insertions(+), 2 deletions(-) create mode 100644 hbase-common/src/test/resources/hdfs-scr-disabled.xml create mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 9092dd147198..20f3881edb2c 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - false + If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - none + This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 6a0b4283ed03..ffa94ba2d59f 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -116,6 +116,23 @@ public void testSecurityConfCaseInsensitive() { Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } + @Test + public void testGetConfigOfShortcircuitRead() throws Exception { + Configuration conf = HBaseConfiguration.create(); + Configuration.addDefaultResource("hdfs-scr-disabled.xml"); + assertEquals("hdfs-scr-disabled.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("false", conf.get("dfs.client.read.shortcircuit")); + assertNull(conf.get("dfs.domain.socket.path")); + Configuration.addDefaultResource("hdfs-scr-enabled.xml"); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.domain.socket.path")[0]); + assertEquals("true", conf.get("dfs.client.read.shortcircuit")); + assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); + } + private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-common/src/test/resources/hdfs-scr-disabled.xml b/hbase-common/src/test/resources/hdfs-scr-disabled.xml new file mode 100644 index 000000000000..fdb3c36edc87 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-disabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + false + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml new file mode 100644 index 000000000000..8594494782c5 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + true + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + /var/lib/hadoop-hdfs/dn_socket + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + From 5289e59f1880c6a8fbe13331337c3e98856ec2ce Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 14 Jan 2021 11:21:09 -0800 Subject: [PATCH 645/769] HBASE-25375 Provide a VM-based release environment (#2754) This adds a Vagrantfile and supporting automation that creates a virtual machine environment suitable for running the create-release scripting. Signed-off-by: Duo Zhang Signed-off-by: Michael Stack --- dev-support/release-vm/.gitignore | 3 + dev-support/release-vm/README.md | 141 ++++++++++++++++++ dev-support/release-vm/Vagrantfile | 50 +++++++ dev-support/release-vm/provision/focal.sh | 25 ++++ .../release-vm/puppet/production/.gitignore | 3 + .../production/.librarian/puppet/config | 21 +++ .../release-vm/puppet/production/Puppetfile | 27 ++++ .../puppet/production/data/nodes/rmvm.yaml | 31 ++++ .../puppet/production/environment.conf | 20 +++ .../release-vm/puppet/production/hiera.yaml | 23 +++ .../puppet/production/manifests/default.pp | 44 ++++++ 11 files changed, 388 insertions(+) create mode 100644 dev-support/release-vm/.gitignore create mode 100644 dev-support/release-vm/README.md create mode 100644 dev-support/release-vm/Vagrantfile create mode 100755 dev-support/release-vm/provision/focal.sh create mode 100644 dev-support/release-vm/puppet/production/.gitignore create mode 100644 dev-support/release-vm/puppet/production/.librarian/puppet/config create mode 100644 dev-support/release-vm/puppet/production/Puppetfile create mode 100644 dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml create mode 100644 dev-support/release-vm/puppet/production/environment.conf create mode 100644 dev-support/release-vm/puppet/production/hiera.yaml create mode 100644 dev-support/release-vm/puppet/production/manifests/default.pp diff --git a/dev-support/release-vm/.gitignore b/dev-support/release-vm/.gitignore new file mode 100644 index 000000000000..49b3eb5bd9e3 --- /dev/null +++ b/dev-support/release-vm/.gitignore @@ -0,0 +1,3 @@ +.vagrant/ +*.log +*.patch diff --git a/dev-support/release-vm/README.md b/dev-support/release-vm/README.md new file mode 100644 index 000000000000..74bb4392d2eb --- /dev/null +++ b/dev-support/release-vm/README.md @@ -0,0 +1,141 @@ + + +# HBase Release Env + +This is a vagrant project that provides a virtual machine environment suitable +for running an Apache HBase release. + +Requires: +* [VirtualBox](http://virtualbox.org) +* [Vagrant](http://virtualbox.org) +* The private portion of your signing key avilable in the local GPG agent +* The private portion of your Github authentication key available in either the local GPG agent or + local SSH agent + +## Usage + +Unlock the local keyring before proceeding (this should prompt you for your GPG passphrase). For +example, assuming you have an authentication key configured in your keyring, this will do the +trick. + +All terminal commands used below are assumed to be run with the current working directory as the +location containing the `Vagrantfile`. + +The term "Host" is used to mean the environment that runs the Vagrant process. The term "Guest" is +used to mean the virtual machine managed by the Host. + +### Ensure credentials work from the Host OS + +The ssh- and gpg-agent forwarding configuration used here assumes that your credentials work +on the Host. Verify both are working before you proceed with the Guest. Additionally, using the +credentials requires you to unlock the respective keyring, the state of which is persisted by the +agent process or processes running on the Host. + +See instructions in [`create-release`](../create-release/README.txt) regarding proper +configuration of ssh- and gpg-agents. + +Assuming the git repo origin is on GitHub, the following command will ensure that your ssh +credentials are working. On the Host, run: + +```sh +host:~$ ssh -T git@github.com +Hi ! You've successfully authenticated, but GitHub does not provide shell access. +``` + +Likewise, ensure you have an encryption key that can be used to sign a file. Again, on the Host, +run: + +```sh +host:~$ gpg --detach --armor --sign Vagrantfile +host:~$ gpg --verify Vagrantfile.asc +gpg: assuming signed data in 'Vagrantfile' +... +host:~$ rm Vagrantfile.asc +``` + +### Make public keyring available to the VM + +Export the public portion of your signing credentials where the Guest can access it. Vagrant +(+VirtualBox) shares the directory of the `Vagrantfile` with the Linux Guest via the `/vagrant` +mount point. Any files present in this working directory on the Host are available to the Guest. + +From the Host, run: + +```sh +host:~$ gpg --export @apache.org > gpg..apache.pub +``` + +### Launch the Guest VM + +Launch the Guest VM by running + +```sh +host:~$ vagrant up +``` + +If anything about the Vagrant or VirtualBox environment have changed since you last used this VM, +it's best to `vagrant destroy -f` all local state and `vagrant up` a fresh instance. + +### Verify the Guest VM + +Connect to the Guest. This should forward your ssh- and gpg-agent session, as configured in the +`Vagrantfile`. + +```sh +host:~$ vagrant ssh +``` + +Now that you're in the Guest VM, be sure that all `gpg` command you issue include the +`--no-autostart` flag. This ensures that the `gpg` process in the Guest communicates with the +agent running on the Host OS rather than launching its own process on the Guest OS. + +From the Guest, verify that ssh-agent forwarding is working, using the same test performed on the +Host, + +```sh +guest:~$ ssh -T git@github.com +Hi ! You've successfully authenticated, but GitHub does not provide shell access. +``` + +From the Guest, import your exported public identity and verify the gpg-agent passthrough is +working correctly. + +```sh +guest:~$ gpg --no-autostart --import /vagrant/gpg..apache.pub +... +gpg: Total number processed: 1 +gpg: imported: 1 +guest:~$ gpg --no-autostart --detach --armor --sign repos/hbase/pom.xml +guest:~$ gpg --no-autostart --verify repos/hbase/pom.xml.asc +gpg: assuming signed data in 'repos/hbase/pom.xml' +... +guest:~$ rm repos/hbase/pom.xml.asc +``` + +### Build a Release Candidate + +Finally, you can initiate the release build. Follow the instructions in +[`create-release`](../create-release/README.txt), i.e., + +```sh +guest:~$ mkdir ~/build-2.3.1-rc0 +guest:~$ cd repos/hbase +guest:~/repos/hbase$ ./dev-support/create-release/do-release-docker.sh -d ~/build-2.3.1-rc0/ ... +``` diff --git a/dev-support/release-vm/Vagrantfile b/dev-support/release-vm/Vagrantfile new file mode 100644 index 000000000000..e6a9a74b10ff --- /dev/null +++ b/dev-support/release-vm/Vagrantfile @@ -0,0 +1,50 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +Vagrant.configure("2") do |config| + + config.vm.define "rmvm" do |rmvm| + rmvm.vm.box = "ubuntu/focal64" + rmvm.vm.hostname = "rmvm" + + rmvm.vm.provision "shell", path: "provision/focal.sh", run: "once" + + rmvm.vm.provision "puppet", run: "always" do |puppet| + puppet.environment = "production" + puppet.environment_path = "puppet" + puppet.working_directory = "/tmp/vagrant-puppet" + puppet.options = "--test" + end + + rmvm.vm.provider "virtualbox" do |vb| + vb.name = "rmvm" + vb.cpus = 2 + vb.memory = (4 * 1024).to_s + end + end + + # pass through ssh-agent for github interaction + config.ssh.forward_agent = true + # pass through gpg-agent for artifact signing + config.ssh.extra_args = [ + "-R", "/run/user/1000/gnupg/S.gpg-agent:#{%x(gpgconf --list-dirs agent-extra-socket).strip}", + "-R", "/run/user/1000/gnupg/S.gpg-agent.extra:#{%x(gpgconf --list-dir agent-extra-socket).strip}", + ] +end diff --git a/dev-support/release-vm/provision/focal.sh b/dev-support/release-vm/provision/focal.sh new file mode 100755 index 000000000000..8dc30bc0a2ba --- /dev/null +++ b/dev-support/release-vm/provision/focal.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Bootstrap provisioner for a Ubuntu Bionic host. +# + +sudo apt-get update -qq +# puppet lets us manage the host, librarian-puppet lets us download puppet libraries +sudo apt-get install -y --no-install-recommends puppet librarian-puppet +cd /tmp/vagrant-puppet/environments/production && sudo librarian-puppet install --verbose diff --git a/dev-support/release-vm/puppet/production/.gitignore b/dev-support/release-vm/puppet/production/.gitignore new file mode 100644 index 000000000000..2df0470bce95 --- /dev/null +++ b/dev-support/release-vm/puppet/production/.gitignore @@ -0,0 +1,3 @@ +.tmp/ +modules/ +**/*.lock diff --git a/dev-support/release-vm/puppet/production/.librarian/puppet/config b/dev-support/release-vm/puppet/production/.librarian/puppet/config new file mode 100644 index 000000000000..738f292ed132 --- /dev/null +++ b/dev-support/release-vm/puppet/production/.librarian/puppet/config @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +LIBRARIAN_PUPPET_DESTRUCTIVE: 'false' +LIBRARIAN_PUPPET_USE_V1_API: '1' +LIBRARIAN_PUPPET_TMP: "/tmp/librarian_puppet/tmp" diff --git a/dev-support/release-vm/puppet/production/Puppetfile b/dev-support/release-vm/puppet/production/Puppetfile new file mode 100644 index 000000000000..3d5d5e44640e --- /dev/null +++ b/dev-support/release-vm/puppet/production/Puppetfile @@ -0,0 +1,27 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +forge "https://forgeapi.puppetlabs.com" + +mod 'puppet-packages', + :git => "https://github.com/greenaar/puppet-packages.git", + :ref => '8d6b8a85eea931e4cd045884d5786c1c1ff0df4c' +mod 'puppetlabs-docker', '3.10.1' +mod 'puppetlabs-stdlib', '5.2.0' +mod 'puppetlabs-vcsrepo', '3.1.0' +mod 'saz-ssh', '6.2.0' diff --git a/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml b/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml new file mode 100644 index 000000000000..44a66262e31a --- /dev/null +++ b/dev-support/release-vm/puppet/production/data/nodes/rmvm.yaml @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +classes: + - docker + - packages::manage + - ssh + +packages::latest: + - curl + - git + - gnupg + - gnupg-agent + +ssh::server_options: + StreamLocalBindUnlink: 'yes' diff --git a/dev-support/release-vm/puppet/production/environment.conf b/dev-support/release-vm/puppet/production/environment.conf new file mode 100644 index 000000000000..c6deb8dd9886 --- /dev/null +++ b/dev-support/release-vm/puppet/production/environment.conf @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +modulepath = modules:site:$basemodulepath +manifest = manifests diff --git a/dev-support/release-vm/puppet/production/hiera.yaml b/dev-support/release-vm/puppet/production/hiera.yaml new file mode 100644 index 000000000000..a8bb7c1c965f --- /dev/null +++ b/dev-support/release-vm/puppet/production/hiera.yaml @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +version: 5 + +hierarchy: + - name: "Per-node data" + path: "nodes/%{facts.hostname}.yaml" diff --git a/dev-support/release-vm/puppet/production/manifests/default.pp b/dev-support/release-vm/puppet/production/manifests/default.pp new file mode 100644 index 000000000000..e429d5af4bed --- /dev/null +++ b/dev-support/release-vm/puppet/production/manifests/default.pp @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +lookup('classes', Array[String], 'unique').include + +node rmvm { + $user = 'vagrant' + + # include the default `vagrant` user in the `docker` group + user { $user: + groups => ['docker'], + require => Package['docker'], + } + + # allow managing git repos in puppet + vcsrepo { "/home/${user}/repos/hbase": + ensure => latest, + branch => 'master', + group => $user, + owner => $user, + keep_local_changes => true, + provider => git, + remote => 'origin', + source => { + 'origin' => 'https://github.com/apache/hbase.git', + }, + depth => 1, + } +} From 2e8416b18cc1f2a7ed49445e0b0f165ee169ba5c Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 15 Jan 2021 13:45:08 +0800 Subject: [PATCH 646/769] HBASE-25439 Add BYTE unit in PrettyPrinter.Unit (addendum) (#2841) Signed-off-by: stack --- .../hadoop/hbase/client/TableDescriptorBuilder.java | 1 + .../hadoop/hbase/client/TestTableDescriptorBuilder.java | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index fd466654ea4e..d98386817148 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -251,6 +251,7 @@ public class TableDescriptorBuilder { public static PrettyPrinter.Unit getUnit(String key) { switch (key) { case MAX_FILESIZE: + case MEMSTORE_FLUSHSIZE: return PrettyPrinter.Unit.BYTE; default: return PrettyPrinter.Unit.NONE; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 425d59022ab0..43824afe8107 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -348,11 +348,15 @@ public void testStringCustomizedValues() throws HBaseException { "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", htd.toStringCustomizedValues()); - htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528").build(); + htd = TableDescriptorBuilder.newBuilder(htd) + .setMaxFileSize("10737942528") + .setMemStoreFlushSize("256MB") + .build(); assertEquals( "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " - + "MAX_FILESIZE => '10737942528 B (10GB 512KB)'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", + + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, {NAME => 'cf', BLOCKSIZE => '1000'}", htd.toStringCustomizedValues()); } From 77cf4ae668b856365320c8ec7b9ca3fea8c412c7 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 15 Jan 2021 14:00:50 +0800 Subject: [PATCH 647/769] HBASE-25431 MAX_FILESIZE and MEMSTORE_FLUSHSIZE should not be set negative number (#2803) Signed-off-by: stack --- .../hbase/util/TableDescriptorChecker.java | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index c69d38a8ec25..30c07b325a17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -82,10 +83,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check max file size long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit - long maxFileSize = td.getMaxFileSize(); - if (maxFileSize < 0) { - maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit); - } + // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in + // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check + long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? + conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : + Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + @@ -96,10 +98,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check flush size long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit - long flushSize = td.getMemStoreFlushSize(); - if (flushSize < 0) { - flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit); - } + // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in + // hbase-site.xml, use flushSizeLowerLimit instead to skip this check + long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? + conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : + Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + From bff775b13dbfda6aad9c1e2a98fba8d05c112443 Mon Sep 17 00:00:00 2001 From: Anjan Das Date: Fri, 15 Jan 2021 12:18:12 +0530 Subject: [PATCH 648/769] HBASE-25475: Improve UT added as part of HBASE-25445 in TestSplitWALManager (#2855) Signed-off-by: Wellington Chevreuil --- .../hbase/master/TestSplitWALManager.java | 101 ++++++------------ 1 file changed, 32 insertions(+), 69 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index 10eda749891d..40adbeaf9fe2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -31,14 +31,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; -import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -98,58 +90,6 @@ public void teardown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{ - HBaseTestingUtility test_util_2 = new HBaseTestingUtility(); - Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); - test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); - CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir); - test_util_2.startMiniCluster(3); - HMaster master2 = test_util_2.getHBaseCluster().getMaster(); - LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem() - .getFileSystem().getUri()); - LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem() - .getWALFileSystem().getUri()); - Table table = test_util_2.createTable(TABLE_NAME, FAMILY); - test_util_2.waitTableAvailable(TABLE_NAME); - Admin admin = test_util_2.getAdmin(); - MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster() - .getMasterProcedureExecutor().getEnvironment(); - final ProcedureExecutor executor = test_util_2.getMiniHBaseCluster() - .getMaster().getMasterProcedureExecutor(); - List regionInfos = admin.getRegions(TABLE_NAME); - SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure( - env, regionInfos.get(0), Bytes.toBytes("row5")); - // Populate some rows in the table - LOG.info("Beginning put data to the table: " + TABLE_NAME.toString()); - int rowCount = 5; - for (int i = 0; i < rowCount; i++) { - byte[] row = Bytes.toBytes("row" + i); - Put put = new Put(row); - put.addColumn(FAMILY, FAMILY, FAMILY); - table.put(put); - } - executor.submitProcedure(splitProcedure); - LOG.info("Submitted SplitProcedure."); - test_util_2.waitFor(30000, () -> executor.getProcedures().stream() - .filter(p -> p instanceof TransitRegionStateProcedure) - .map(p -> (TransitRegionStateProcedure) p) - .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); - test_util_2.getMiniHBaseCluster().killRegionServer( - test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName()); - test_util_2.getMiniHBaseCluster().startRegionServer(); - test_util_2.waitUntilNoRegionsInTransition(); - Scan scan = new Scan(); - ResultScanner results = table.getScanner(scan); - int scanRowCount = 0; - while (results.next() != null) { - scanRowCount++; - } - Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount + - " were expected.", rowCount, scanRowCount); - test_util_2.shutdownMiniCluster(); - } - @Test public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>(); @@ -272,16 +212,22 @@ public void testGetWALsToSplit() throws Exception { Assert.assertEquals(0, metaWals.size()); } - @Test - public void testSplitLogs() throws Exception { - TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE); + private void splitLogsTestHelper(HBaseTestingUtility testUtil) throws Exception { + HMaster hmaster = testUtil.getHBaseCluster().getMaster(); + SplitWALManager splitWALManager = hmaster.getSplitWALManager(); + LOG.info("The Master FS is pointing to: " + hmaster.getMasterFileSystem() + .getFileSystem().getUri()); + LOG.info("The WAL FS is pointing to: " + hmaster.getMasterFileSystem() + .getWALFileSystem().getUri()); + + testUtil.createTable(TABLE_NAME, FAMILY, testUtil.KEYS_FOR_HBA_CREATE_TABLE); // load table - TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY); - ProcedureExecutor masterPE = master.getMasterProcedureExecutor(); - ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta(); - ServerName testServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream() - .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() - .get(); + testUtil.loadTable(testUtil.getConnection().getTable(TABLE_NAME), FAMILY); + ProcedureExecutor masterPE = hmaster.getMasterProcedureExecutor(); + ServerName metaServer = testUtil.getHBaseCluster().getServerHoldingMeta(); + ServerName testServer = testUtil.getHBaseCluster().getRegionServerThreads().stream() + .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() + .get(); List procedures = splitWALManager.splitWALs(testServer, false); Assert.assertEquals(1, procedures.size()); ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0)); @@ -294,6 +240,23 @@ public void testSplitLogs() throws Exception { Assert.assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size()); } + @Test + public void testSplitLogs() throws Exception { + splitLogsTestHelper(TEST_UTIL); + } + + @Test + public void testSplitLogsWithDifferentWalAndRootFS() throws Exception{ + HBaseTestingUtility testUtil2 = new HBaseTestingUtility(); + testUtil2.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1); + Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); + testUtil2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); + CommonFSUtils.setWALRootDir(testUtil2.getConfiguration(), dir); + testUtil2.startMiniCluster(3); + splitLogsTestHelper(testUtil2); + testUtil2.shutdownMiniCluster(); + } + @Test public void testWorkerReloadWhenMasterRestart() throws Exception { List testProcedures = new ArrayList<>(); From c660e8ae2e1a0b13547c319514d0841f97ac84b9 Mon Sep 17 00:00:00 2001 From: Christine Feng Date: Sat, 16 Jan 2021 00:03:13 -0800 Subject: [PATCH 649/769] HBASE-25478 - Implement retries when enabling tables in TestRegionReplicaReplicationEndpoint (#2866) Signed-off-by: stack Signed-off-by: Viraj Jasani --- .../TestRegionReplicaReplicationEndpoint.java | 47 ++++++++++++++----- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java index 54560582cd35..62989d3bf747 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.List; import java.util.UUID; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell.Type; @@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; +import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -74,6 +76,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles; /** * Tests RegionReplicaReplicationEndpoint class by setting up region replicas and verifying @@ -145,7 +148,7 @@ public void testRegionReplicaReplicationPeerIsCreated() throws IOException { .createTableDescriptor(TableName.valueOf("testReplicationPeerIsCreated_no_region_replicas"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); try { peerConfig = admin.getReplicationPeerConfig(peerId); fail("Should throw ReplicationException, because replication peer id=" + peerId @@ -157,7 +160,7 @@ public void testRegionReplicaReplicationPeerIsCreated() throws IOException { htd = HTU.createModifyableTableDescriptor(TableName.valueOf("testReplicationPeerIsCreated"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED).setRegionReplication(2).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); // assert peer configuration is correct peerConfig = admin.getReplicationPeerConfig(peerId); @@ -193,7 +196,7 @@ public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exc TableName.valueOf("testRegionReplicaReplicationPeerIsCreatedForModifyTable"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); // assert that replication peer is not created yet try { @@ -207,7 +210,7 @@ public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exc HTU.getAdmin().disableTable(htd.getTableName()); htd = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(2).build(); HTU.getAdmin().modifyTable(htd); - HTU.getAdmin().enableTable(htd.getTableName()); + createOrEnableTableWithRetries(htd, false); // assert peer configuration is correct peerConfig = admin.getReplicationPeerConfig(peerId); @@ -229,7 +232,7 @@ public void testRegionReplicaReplication(int regionReplication) throws Exception ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) .setRegionReplication(regionReplication).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); TableName tableNameNoReplicas = TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS"); HTU.deleteTableIfAny(tableNameNoReplicas); @@ -318,7 +321,7 @@ public void testRegionReplicaWithoutMemstoreReplication() throws Exception { int regionReplication = 3; TableDescriptor htd = HTU.createModifyableTableDescriptor(name.getMethodName()) .setRegionReplication(regionReplication).setRegionMemStoreReplication(false).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); final TableName tableName = htd.getTableName(); Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(tableName); @@ -352,7 +355,7 @@ public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception int regionReplication = 3; TableDescriptor htd = HTU.createModifyableTableDescriptor(name.getMethodName()) .setRegionReplication(regionReplication).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); final TableName tableName = htd.getTableName(); Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); @@ -404,7 +407,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa final TableName tableName = htd.getTableName(); HTU.deleteTableIfAny(tableName); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); TableName toBeDisabledTable = TableName.valueOf( dropTable ? "droppedTable" : (disableReplication ? "disableReplication" : "disabledTable")); HTU.deleteTableIfAny(toBeDisabledTable); @@ -413,7 +416,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) .setRegionReplication(regionReplication).build(); - HTU.getAdmin().createTable(htd); + createOrEnableTableWithRetries(htd, true); // both tables are created, now pause replication HTU.getAdmin().disableReplicationPeer(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_PEER); @@ -443,7 +446,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa htd = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(regionReplication - 2).build(); HTU.getAdmin().modifyTable(htd); - HTU.getAdmin().enableTable(toBeDisabledTable); + createOrEnableTableWithRetries(htd, false); } HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(0); @@ -467,7 +470,7 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table htd = TableDescriptorBuilder.newBuilder(htd).setRegionReplication(regionReplication).build(); HTU.getAdmin().modifyTable(htd); - HTU.getAdmin().enableTable(toBeDisabledTable); + createOrEnableTableWithRetries(htd, false); } try { @@ -487,4 +490,26 @@ private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disa connection.close(); } } + + private void createOrEnableTableWithRetries(TableDescriptor htd, boolean createTableOperation) { + // Helper function to run create/enable table operations with a retry feature + boolean continueToRetry = true; + int tries = 0; + while (continueToRetry && tries < 50) { + try { + continueToRetry = false; + if (createTableOperation) { + HTU.getAdmin().createTable(htd); + } else { + HTU.getAdmin().enableTable(htd.getTableName()); + } + } catch (IOException e) { + if (e.getCause() instanceof ReplicationException) { + continueToRetry = true; + tries++; + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + } + } + } } From 0aa121f1e1fe41d7f6bf4b3e3dcbafbecea47c2c Mon Sep 17 00:00:00 2001 From: Anjan Das Date: Sun, 17 Jan 2021 16:31:07 +0530 Subject: [PATCH 650/769] HBASE-25475 : Unset zk based wal splitting explicitly in tests (ADDENDUM) (#2891) Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/master/TestSplitWALManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index 40adbeaf9fe2..e1f318869bab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -248,6 +248,7 @@ public void testSplitLogs() throws Exception { @Test public void testSplitLogsWithDifferentWalAndRootFS() throws Exception{ HBaseTestingUtility testUtil2 = new HBaseTestingUtility(); + testUtil2.getConfiguration().setBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, false); testUtil2.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1); Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); testUtil2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); From beff1cc74bac788e3345d03fafad5e3c423315f9 Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Mon, 18 Jan 2021 18:57:02 +0800 Subject: [PATCH 651/769] =?UTF-8?q?HBASE-25497=20move=5Fnamespaces=5Frsgro?= =?UTF-8?q?up=20should=20change=20hbase.rsgroup.name=20=E2=80=A6=20(#2875)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * HBASE-25497 move_namespaces_rsgroup should change hbase.rsgroup.name config in NamespaceDescriptor Signed-off-by: Zheng Wang --- .../main/ruby/shell/commands/move_namespaces_rsgroup.rb | 4 ++++ hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb index 7878eb9317ca..60b0bff62c28 100644 --- a/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb +++ b/hbase-shell/src/main/ruby/shell/commands/move_namespaces_rsgroup.rb @@ -31,6 +31,10 @@ def help def command(dest, namespaces) rsgroup_admin.move_namespaces(dest, namespaces) + namespaces.each do |ns| + arg = {'METHOD' => 'set', 'hbase.rsgroup.name' => dest} + admin.alter_namespace(ns, arg) + end end end end diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb index eec92b30ba0b..f93d36422491 100644 --- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb +++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb @@ -114,8 +114,16 @@ def remove_rsgroup(group_name) assert_not_nil(group) assert_true(@admin.listTablesInRSGroup(group_name).contains(org.apache.hadoop.hbase.TableName.valueOf(ns_table_name))) + ns_table_name2 = 'test_namespace:test_ns_table2' + @shell.command(:create, ns_table_name2, 'f') + + assert_true(@admin.listTablesInRSGroup(group_name).contains(org.apache.hadoop.hbase.TableName.valueOf(ns_table_name2))) + assert_equal(2, @admin.listTablesInRSGroup(group_name).count) + @shell.command(:disable, ns_table_name) @shell.command(:drop, ns_table_name) + @shell.command(:disable, ns_table_name2) + @shell.command(:drop, ns_table_name2) @shell.command(:drop_namespace, namespace_name) remove_rsgroup(group_name) end From d642d742e98c78e0a21fa1478e5fd3e60d022ba1 Mon Sep 17 00:00:00 2001 From: xijiawen <15891721997@163.com> Date: Tue, 19 Jan 2021 17:21:01 +0800 Subject: [PATCH 652/769] HBASE-25496 add get_namespace_rsgroup command (#2874) Signed-off-by: Viraj Jasani --- hbase-shell/src/main/ruby/hbase/admin.rb | 10 +++++ hbase-shell/src/main/ruby/shell.rb | 1 + .../shell/commands/get_namespace_rsgroup.rb | 41 +++++++++++++++++++ 3 files changed, 52 insertions(+) create mode 100644 hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index d874d6337b84..4e3b0de16a04 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1447,6 +1447,16 @@ def alter_namespace(namespace_name, *args) @admin.modifyNamespace(nsb.build) end + #---------------------------------------------------------------------------------------------- + # Get namespace's rsgroup + def get_namespace_rsgroup(namespace_name) + # Fail if namespace name is not a string + raise(ArgumentError, 'Namespace name must be of type String') unless namespace_name.is_a?(String) + nsd = @admin.getNamespaceDescriptor(namespace_name) + raise(ArgumentError, 'Namespace does not exist') unless nsd + nsd.getConfigurationValue("hbase.rsgroup.name") + end + #---------------------------------------------------------------------------------------------- # Drops a table def drop_namespace(namespace_name) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index ba069146ef31..549e31d1dd65 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -628,5 +628,6 @@ def self.exception_handler(hide_traceback) rename_rsgroup alter_rsgroup_config show_rsgroup_config + get_namespace_rsgroup ] ) diff --git a/hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb new file mode 100644 index 000000000000..a4991d16943b --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_namespace_rsgroup.rb @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetNamespaceRsgroup < Command + def help + <<-EOF +Get the group name the given NameSpace is a member of. + +Example: + + hbase> get_namespace_rsgroup 'namespace_name' + +EOF + end + + def command(namespace_name) + group_name = admin.get_namespace_rsgroup(namespace_name) + unless group_name.nil? + formatter.row([group_name]) + end + formatter.footer(1) + end + end + end +end \ No newline at end of file From a0fcbd3c6ca7c13f4ffe1ebb54691efb0c011e8a Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Tue, 19 Jan 2021 23:53:51 +0800 Subject: [PATCH 653/769] HBASE-25513 When the table is turned on normalize, the first region may not be merged even the size is 0 (#2887) Signed-off-by: Nick Dimiduk --- .../normalizer/SimpleRegionNormalizer.java | 4 +++ .../TestSimpleRegionNormalizer.java | 32 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 6d7387b7f11b..08e529cd01a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -364,6 +364,10 @@ private List computeMergeNormalizationPlans(final NormalizeCo if (rangeMembers.isEmpty() // when there are no range members, seed the range with whatever // we have. this way we're prepared in case the next region is // 0-size. + || (rangeMembers.size() == 1 && sumRangeMembersSizeMb == 0) // when there is only one + // region and the size is 0, + // seed the range with + // whatever we have. || regionSizeMb == 0 // always add an empty region to the current range. || (regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb)) { // add the current region // to the range when diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 33b32972542e..70f5a87ac42a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -448,6 +448,38 @@ public void testMergeEmptyRegions1() { .build())); } + @Test + public void testMergeEmptyRegions2() { + conf.setBoolean(SPLIT_ENABLED_KEY, false); + conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); + final TableName tableName = name.getTableName(); + final List regionInfos = createRegionInfos(tableName, 8); + final Map regionSizes = + createRegionSizesMap(regionInfos, 0, 10, 1, 0, 9, 0, 10, 0); + setupMocksForNormalizer(regionSizes, regionInfos); + + assertFalse(normalizer.isSplitEnabled()); + assertEquals(0, normalizer.getMergeMinRegionSizeMb()); + List plans = normalizer.computePlansForTable(tableName); + assertThat(plans, contains( + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 10) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(2), 1) + .addTarget(regionInfos.get(3), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(4), 9) + .addTarget(regionInfos.get(5), 0) + .build(), + new MergeNormalizationPlan.Builder() + .addTarget(regionInfos.get(6), 10) + .addTarget(regionInfos.get(7), 0) + .build())); + } + @Test public void testSplitAndMultiMerge() { conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0); From b116686fbc821ef3c2856549c1edd2c7aa126f4c Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Wed, 20 Jan 2021 03:09:50 +0800 Subject: [PATCH 654/769] HBASE-25482 Improve SimpleRegionNormalizer#getAverageRegionSizeMb (#2858) Signed-off-by: Nick Dimiduk --- .../normalizer/SimpleRegionNormalizer.java | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 08e529cd01a4..1675e049d77d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -257,16 +257,13 @@ private double getAverageRegionSizeMb(final List tableRegions) { throw new IllegalStateException( "Cannot calculate average size of a table without any regions."); } - final int regionCount = tableRegions.size(); - final long totalSizeMb = tableRegions.stream() - .mapToLong(this::getRegionSizeMB) - .sum(); TableName table = tableRegions.get(0).getTable(); int targetRegionCount = -1; long targetRegionSize = -1; + double avgRegionSize; try { TableDescriptor tableDescriptor = masterServices.getTableDescriptors().get(table); - if (tableDescriptor != null && LOG.isDebugEnabled()) { + if (tableDescriptor != null) { targetRegionCount = tableDescriptor.getNormalizerTargetRegionCount(); targetRegionSize = tableDescriptor.getNormalizerTargetRegionSize(); LOG.debug("Table {} configured with target region count {}, target region size {}", table, @@ -276,18 +273,22 @@ private double getAverageRegionSizeMb(final List tableRegions) { LOG.warn("TableDescriptor for {} unavailable, table-level target region count and size" + " configurations cannot be considered.", table, e); } - - double avgRegionSize; if (targetRegionSize > 0) { avgRegionSize = targetRegionSize; - } else if (targetRegionCount > 0) { - avgRegionSize = totalSizeMb / (double) targetRegionCount; } else { - avgRegionSize = totalSizeMb / (double) regionCount; + final int regionCount = tableRegions.size(); + final long totalSizeMb = tableRegions.stream() + .mapToLong(this::getRegionSizeMB) + .sum(); + if (targetRegionCount > 0) { + avgRegionSize = totalSizeMb / (double) targetRegionCount; + } else { + avgRegionSize = totalSizeMb / (double) regionCount; + } + LOG.debug("Table {}, total aggregated regions size: {} and average region size {}", table, + totalSizeMb, avgRegionSize); } - LOG.debug("Table {}, total aggregated regions size: {} and average region size {}", table, - totalSizeMb, avgRegionSize); return avgRegionSize; } From 8bc05cf6fe4818375a6578c4f0189222f2972168 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 20 Jan 2021 16:10:36 +0800 Subject: [PATCH 655/769] =?UTF-8?q?HBASE-25509=20ChoreService.cancelChore?= =?UTF-8?q?=20will=20not=20call=20ScheduledChore.cle=E2=80=A6=20(#2890)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/ChoreService.java | 135 ++- .../apache/hadoop/hbase/ScheduledChore.java | 103 +- .../apache/hadoop/hbase/TestChoreService.java | 1076 ++++++++--------- .../apache/hadoop/hbase/master/HMaster.java | 40 +- .../hbase/master/RegionsRecoveryChore.java | 22 - .../master/RegionsRecoveryConfigManager.java | 45 +- .../hadoop/hbase/master/ServerManager.java | 2 +- .../hadoop/hbase/master/SplitLogManager.java | 2 +- .../hadoop/hbase/quotas/QuotaCache.java | 2 +- .../quotas/RegionServerSpaceQuotaManager.java | 4 +- .../hbase/regionserver/HRegionServer.java | 24 +- .../hbase/regionserver/HeapMemoryManager.java | 2 +- .../TestRegionsRecoveryConfigManager.java | 58 +- .../master/janitor/TestCatalogJanitor.java | 2 +- 14 files changed, 718 insertions(+), 799 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java index 39c3ccc69199..5bd67ad02eec 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; @@ -26,8 +27,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hbase.ScheduledChore.ChoreServicer; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +51,7 @@ * Calling this method ensures that all scheduled chores are cancelled and cleaned up properly. */ @InterfaceAudience.Public -public class ChoreService implements ChoreServicer { +public class ChoreService { private static final Logger LOG = LoggerFactory.getLogger(ChoreService.class); /** @@ -141,28 +140,39 @@ public ChoreService(final String coreThreadPoolPrefix, int corePoolSize, boolean * @return true when the chore was successfully scheduled. false when the scheduling failed * (typically occurs when a chore is scheduled during shutdown of service) */ - public synchronized boolean scheduleChore(ScheduledChore chore) { + public boolean scheduleChore(ScheduledChore chore) { if (chore == null) { return false; } - - try { - if (chore.getPeriod() <= 0) { - LOG.info("Chore {} is disabled because its period is not positive.", chore); - return false; - } - LOG.info("Chore {} is enabled.", chore); - chore.setChoreServicer(this); - ScheduledFuture future = - scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), chore.getPeriod(), - chore.getTimeUnit()); - scheduledChores.put(chore, future); - return true; - } catch (Exception exception) { - if (LOG.isInfoEnabled()) { - LOG.info("Could not successfully schedule chore: " + chore.getName()); + // always lock chore first to prevent dead lock + synchronized (chore) { + synchronized (this) { + try { + // Chores should only ever be scheduled with a single ChoreService. If the choreService + // is changing, cancel any existing schedules of this chore. + if (chore.getChoreService() == this) { + LOG.warn("Chore {} has already been scheduled with us", chore); + return false; + } + if (chore.getPeriod() <= 0) { + LOG.info("Chore {} is disabled because its period is not positive.", chore); + return false; + } + LOG.info("Chore {} is enabled.", chore); + if (chore.getChoreService() != null) { + LOG.info("Cancel chore {} from its previous service", chore); + chore.getChoreService().cancelChore(chore); + } + chore.setChoreService(this); + ScheduledFuture future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), + chore.getPeriod(), chore.getTimeUnit()); + scheduledChores.put(chore, future); + return true; + } catch (Exception e) { + LOG.error("Could not successfully schedule chore: {}", chore.getName(), e); + return false; + } } - return false; } } @@ -175,19 +185,35 @@ private void rescheduleChore(ScheduledChore chore) { ScheduledFuture future = scheduledChores.get(chore); future.cancel(false); } - scheduleChore(chore); + ScheduledFuture future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), + chore.getPeriod(), chore.getTimeUnit()); + scheduledChores.put(chore, future); } - @InterfaceAudience.Private - @Override - public synchronized void cancelChore(ScheduledChore chore) { + /** + * Cancel any ongoing schedules that this chore has with the implementer of this interface. + *

    + * Call {@link ScheduledChore#cancel()} to cancel a {@link ScheduledChore}, in + * {@link ScheduledChore#cancel()} method we will call this method to remove the + * {@link ScheduledChore} from this {@link ChoreService}. + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/(ScheduledChore|ChoreService).java") + synchronized void cancelChore(ScheduledChore chore) { cancelChore(chore, true); } - @InterfaceAudience.Private - @Override - public synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning) { - if (chore != null && scheduledChores.containsKey(chore)) { + /** + * Cancel any ongoing schedules that this chore has with the implementer of this interface. + *

    + * Call {@link ScheduledChore#cancel(boolean)} to cancel a {@link ScheduledChore}, in + * {@link ScheduledChore#cancel(boolean)} method we will call this method to remove the + * {@link ScheduledChore} from this {@link ChoreService}. + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/(ScheduledChore|ChoreService).java") + synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning) { + if (scheduledChores.containsKey(chore)) { ScheduledFuture future = scheduledChores.get(chore); future.cancel(mayInterruptIfRunning); scheduledChores.remove(chore); @@ -201,21 +227,24 @@ public synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptI } } + /** + * @return true when the chore is scheduled with the implementer of this interface + */ @InterfaceAudience.Private - @Override public synchronized boolean isChoreScheduled(ScheduledChore chore) { return chore != null && scheduledChores.containsKey(chore) && !scheduledChores.get(chore).isDone(); } - @InterfaceAudience.Private - @Override - public synchronized boolean triggerNow(ScheduledChore chore) { - if (chore != null) { - rescheduleChore(chore); - return true; - } - return false; + /** + * This method tries to execute the chore immediately. If the chore is executing at the time of + * this call, the chore will begin another execution as soon as the current execution finishes + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ScheduledChore.java") + synchronized void triggerNow(ScheduledChore chore) { + assert chore.getChoreService() == this; + rescheduleChore(chore); } /** @@ -295,10 +324,20 @@ private synchronized void requestCorePoolDecrease() { } } - @InterfaceAudience.Private - @Override - public synchronized void onChoreMissedStartTime(ScheduledChore chore) { - if (chore == null || !scheduledChores.containsKey(chore)) return; + /** + * A callback that tells the implementer of this interface that one of the scheduled chores is + * missing its start time. The implication of a chore missing its start time is that the service's + * current means of scheduling may not be sufficient to handle the number of ongoing chores (the + * other explanation is that the chore's execution time is greater than its scheduled period). The + * service should try to increase its concurrency when this callback is received. + * @param chore The chore that missed its start time + */ + @RestrictedApi(explanation = "Should only be called in ScheduledChore", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ScheduledChore.java") + synchronized void onChoreMissedStartTime(ScheduledChore chore) { + if (!scheduledChores.containsKey(chore)) { + return; + } // If the chore has not caused an increase in the size of the core thread pool then request an // increase. This allows each chore missing its start time to increase the core pool size by @@ -319,13 +358,17 @@ public synchronized void onChoreMissedStartTime(ScheduledChore chore) { * shutdown the service. Any chores that are scheduled for execution will be cancelled. Any chores * in the middle of execution will be interrupted and shutdown. This service will be unusable * after this method has been called (i.e. future scheduling attempts will fail). + *

    + * Notice that, this will only clean the chore from this ChoreService but you could still schedule + * the chore with other ChoreService. */ public synchronized void shutdown() { - scheduler.shutdownNow(); - if (LOG.isInfoEnabled()) { - LOG.info("Chore service for: " + coreThreadPoolPrefix + " had " + scheduledChores.keySet() - + " on shutdown"); + if (isShutdown()) { + return; } + scheduler.shutdownNow(); + LOG.info("Chore service for: {} had {} on shutdown", coreThreadPoolPrefix, + scheduledChores.keySet()); cancelAllChores(true); scheduledChores.clear(); choresMissingStartTime.clear(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java index 1fb5b7e9e340..6155bbdeb3b0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.yetus.audience.InterfaceAudience; @@ -33,7 +34,7 @@ * execute within the defined period. It is bad practice to define a ScheduledChore whose execution * time exceeds its period since it will try to hog one of the threads in the {@link ChoreService}'s * thread pool. - *

    + *

    * Don't subclass ScheduledChore if the task relies on being woken up for something to do, such as * an entry being added to a queue, etc. */ @@ -60,7 +61,7 @@ public abstract class ScheduledChore implements Runnable { * Interface to the ChoreService that this ScheduledChore is scheduled with. null if the chore is * not scheduled. */ - private ChoreServicer choreServicer; + private ChoreService choreService; /** * Variables that encapsulate the meaningful state information @@ -77,39 +78,6 @@ public abstract class ScheduledChore implements Runnable { */ private final Stoppable stopper; - interface ChoreServicer { - /** - * Cancel any ongoing schedules that this chore has with the implementer of this interface. - */ - public void cancelChore(ScheduledChore chore); - public void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning); - - /** - * @return true when the chore is scheduled with the implementer of this interface - */ - public boolean isChoreScheduled(ScheduledChore chore); - - /** - * This method tries to execute the chore immediately. If the chore is executing at the time of - * this call, the chore will begin another execution as soon as the current execution finishes - *

    - * If the chore is not scheduled with a ChoreService, this call will fail. - * @return false when the chore could not be triggered immediately - */ - public boolean triggerNow(ScheduledChore chore); - - /** - * A callback that tells the implementer of this interface that one of the scheduled chores is - * missing its start time. The implication of a chore missing its start time is that the - * service's current means of scheduling may not be sufficient to handle the number of ongoing - * chores (the other explanation is that the chore's execution time is greater than its - * scheduled period). The service should try to increase its concurrency when this callback is - * received. - * @param chore The chore that missed its start time - */ - public void onChoreMissedStartTime(ScheduledChore chore); - } - /** * This constructor is for test only. It allows us to create an object and to call chore() on it. */ @@ -168,8 +136,8 @@ public void run() { onChoreMissedStartTime(); LOG.info("Chore: {} missed its start time", getName()); } else if (stopper.isStopped() || !isScheduled()) { - cancel(false); - cleanup(); + // call shutdown here to cleanup the ScheduledChore. + shutdown(false); LOG.info("Chore: {} was stopped", getName()); } else { try { @@ -193,7 +161,6 @@ public void run() { LOG.error("Caught error", t); if (this.stopper.isStopped()) { cancel(false); - cleanup(); } } } @@ -214,7 +181,9 @@ private synchronized void updateTimeTrackingBeforeRun() { * pool threads */ private synchronized void onChoreMissedStartTime() { - if (choreServicer != null) choreServicer.onChoreMissedStartTime(this); + if (choreService != null) { + choreService.onChoreMissedStartTime(this); + } } /** @@ -253,20 +222,17 @@ private synchronized boolean isValidTime(final long time) { * @return false when the Chore is not currently scheduled with a ChoreService */ public synchronized boolean triggerNow() { - if (choreServicer != null) { - return choreServicer.triggerNow(this); - } else { + if (choreService == null) { return false; } + choreService.triggerNow(this); + return true; } - synchronized void setChoreServicer(ChoreServicer service) { - // Chores should only ever be scheduled with a single ChoreService. If the choreServicer - // is changing, cancel any existing schedules of this chore. - if (choreServicer != null && choreServicer != service) { - choreServicer.cancelChore(this, false); - } - choreServicer = service; + @RestrictedApi(explanation = "Should only be called in ChoreService", link = "", + allowedOnPath = ".*/org/apache/hadoop/hbase/ChoreService.java") + synchronized void setChoreService(ChoreService service) { + choreService = service; timeOfThisRun = -1; } @@ -275,9 +241,10 @@ public synchronized void cancel() { } public synchronized void cancel(boolean mayInterruptIfRunning) { - if (isScheduled()) choreServicer.cancelChore(this, mayInterruptIfRunning); - - choreServicer = null; + if (isScheduled()) { + choreService.cancelChore(this, mayInterruptIfRunning); + } + choreService = null; } public String getName() { @@ -310,17 +277,14 @@ public synchronized boolean isInitialChoreComplete() { return initialChoreComplete; } - @InterfaceAudience.Private - synchronized ChoreServicer getChoreServicer() { - return choreServicer; + synchronized ChoreService getChoreService() { + return choreService; } - @InterfaceAudience.Private synchronized long getTimeOfLastRun() { return timeOfLastRun; } - @InterfaceAudience.Private synchronized long getTimeOfThisRun() { return timeOfThisRun; } @@ -329,10 +293,12 @@ synchronized long getTimeOfThisRun() { * @return true when this Chore is scheduled with a ChoreService */ public synchronized boolean isScheduled() { - return choreServicer != null && choreServicer.isChoreScheduled(this); + return choreService != null && choreService.isChoreScheduled(this); } @InterfaceAudience.Private + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") public synchronized void choreForTesting() { chore(); } @@ -354,7 +320,26 @@ protected boolean initialChore() { /** * Override to run cleanup tasks when the Chore encounters an error and must stop running */ - protected synchronized void cleanup() { + protected void cleanup() { + } + + /** + * Call {@link #shutdown(boolean)} with {@code true}. + * @see ScheduledChore#shutdown(boolean) + */ + public synchronized void shutdown() { + shutdown(true); + } + + /** + * Completely shutdown the ScheduleChore, which means we will call cleanup and you should not + * schedule it again. + *

    + * This is another path to cleanup the chore, comparing to stop the stopper instance passed in. + */ + public synchronized void shutdown(boolean mayInterruptIfRunning) { + cancel(mayInterruptIfRunning); + cleanup(); } /** diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java index 69a171c205f7..64a076a60633 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java @@ -20,16 +20,18 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.CountingChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.DoNothingChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.FailInitialChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.SampleStopper; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.SleepingChore; -import org.apache.hadoop.hbase.TestChoreService.ScheduledChoreSamples.SlowChore; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Threads; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -38,261 +40,234 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category(MediumTests.class) +@Category({ MiscTests.class, MediumTests.class }) public class TestChoreService { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestChoreService.class); + HBaseClassTestRule.forClass(TestChoreService.class); - public static final Logger log = LoggerFactory.getLogger(TestChoreService.class); + private static final Logger LOG = LoggerFactory.getLogger(TestChoreService.class); + + private static final Configuration CONF = HBaseConfiguration.create(); @Rule public TestName name = new TestName(); + private int initialCorePoolSize = 3; + + private ChoreService service; + + @Before + public void setUp() { + service = new ChoreService(name.getMethodName(), initialCorePoolSize, false); + } + + @After + public void tearDown() { + shutdownService(service); + } + /** - * A few ScheduledChore samples that are useful for testing with ChoreService + * Straight forward stopper implementation that is used by default when one is not provided */ - public static class ScheduledChoreSamples { - /** - * Straight forward stopper implementation that is used by default when one is not provided - */ - public static class SampleStopper implements Stoppable { - private boolean stopped = false; - - @Override - public void stop(String why) { - stopped = true; - } + private static class SampleStopper implements Stoppable { + private boolean stopped = false; - @Override - public boolean isStopped() { - return stopped; - } + @Override + public void stop(String why) { + stopped = true; } - /** - * Sleeps for longer than the scheduled period. This chore always misses its scheduled periodic - * executions - */ - public static class SlowChore extends ScheduledChore { - public SlowChore(String name, int period) { - this(name, new SampleStopper(), period); - } - - public SlowChore(String name, Stoppable stopper, int period) { - super(name, stopper, period); - } + @Override + public boolean isStopped() { + return stopped; + } + } - @Override - protected boolean initialChore() { - try { - Thread.sleep(getPeriod() * 2); - } catch (InterruptedException e) { - log.warn("", e); - } - return true; - } + /** + * Sleeps for longer than the scheduled period. This chore always misses its scheduled periodic + * executions + */ + private static class SlowChore extends ScheduledChore { + public SlowChore(String name, int period) { + this(name, new SampleStopper(), period); + } - @Override - protected void chore() { - try { - Thread.sleep(getPeriod() * 2); - } catch (InterruptedException e) { - log.warn("", e); - } - } + public SlowChore(String name, Stoppable stopper, int period) { + super(name, stopper, period); } - /** - * Lightweight ScheduledChore used primarily to fill the scheduling queue in tests - */ - public static class DoNothingChore extends ScheduledChore { - public DoNothingChore(String name, int period) { - super(name, new SampleStopper(), period); - } + @Override + protected boolean initialChore() { + Threads.sleep(getPeriod() * 2); + return true; + } - public DoNothingChore(String name, Stoppable stopper, int period) { - super(name, stopper, period); - } + @Override + protected void chore() { + Threads.sleep(getPeriod() * 2); + } + } - @Override - protected void chore() { - // DO NOTHING - } + /** + * Lightweight ScheduledChore used primarily to fill the scheduling queue in tests + */ + private static class DoNothingChore extends ScheduledChore { + public DoNothingChore(String name, int period) { + super(name, new SampleStopper(), period); } - public static class SleepingChore extends ScheduledChore { - private int sleepTime; + public DoNothingChore(String name, Stoppable stopper, int period) { + super(name, stopper, period); + } - public SleepingChore(String name, int chorePeriod, int sleepTime) { - this(name, new SampleStopper(), chorePeriod, sleepTime); - } + @Override + protected void chore() { + // DO NOTHING + } + } - public SleepingChore(String name, Stoppable stopper, int period, int sleepTime) { - super(name, stopper, period); - this.sleepTime = sleepTime; - } + private static class SleepingChore extends ScheduledChore { + private int sleepTime; - @Override - protected boolean initialChore() { - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - log.warn("", e); - } - return true; - } + public SleepingChore(String name, int chorePeriod, int sleepTime) { + this(name, new SampleStopper(), chorePeriod, sleepTime); + } - @Override - protected void chore() { - try { - Thread.sleep(sleepTime); - } catch (Exception e) { - log.warn("", e); - } - } + public SleepingChore(String name, Stoppable stopper, int period, int sleepTime) { + super(name, stopper, period); + this.sleepTime = sleepTime; } - public static class CountingChore extends ScheduledChore { - private int countOfChoreCalls; - private boolean outputOnTicks = false; + @Override + protected boolean initialChore() { + Threads.sleep(sleepTime); + return true; + } - public CountingChore(String name, int period) { - this(name, new SampleStopper(), period); - } + @Override + protected void chore() { + Threads.sleep(sleepTime); + } + } - public CountingChore(String name, Stoppable stopper, int period) { - this(name, stopper, period, false); - } + private static class CountingChore extends ScheduledChore { + private int countOfChoreCalls; + private boolean outputOnTicks = false; - public CountingChore(String name, Stoppable stopper, int period, - final boolean outputOnTicks) { - super(name, stopper, period); - this.countOfChoreCalls = 0; - this.outputOnTicks = outputOnTicks; - } + public CountingChore(String name, int period) { + this(name, new SampleStopper(), period); + } - @Override - protected boolean initialChore() { - countOfChoreCalls++; - if (outputOnTicks) { - outputTickCount(); - } - return true; - } + public CountingChore(String name, Stoppable stopper, int period) { + this(name, stopper, period, false); + } - @Override - protected void chore() { - countOfChoreCalls++; - if (outputOnTicks) { - outputTickCount(); - } - } + public CountingChore(String name, Stoppable stopper, int period, final boolean outputOnTicks) { + super(name, stopper, period); + this.countOfChoreCalls = 0; + this.outputOnTicks = outputOnTicks; + } - private void outputTickCount() { - log.info("Chore: " + getName() + ". Count of chore calls: " + countOfChoreCalls); + @Override + protected boolean initialChore() { + countOfChoreCalls++; + if (outputOnTicks) { + outputTickCount(); } + return true; + } - public int getCountOfChoreCalls() { - return countOfChoreCalls; + @Override + protected void chore() { + countOfChoreCalls++; + if (outputOnTicks) { + outputTickCount(); } + } - public boolean isOutputtingOnTicks() { - return outputOnTicks; - } + private void outputTickCount() { + LOG.info("Chore: " + getName() + ". Count of chore calls: " + countOfChoreCalls); + } - public void setOutputOnTicks(boolean o) { - outputOnTicks = o; - } + public int getCountOfChoreCalls() { + return countOfChoreCalls; } + } + + /** + * A Chore that will try to execute the initial chore a few times before succeeding. Once the + * initial chore is complete the chore cancels itself + */ + public static class FailInitialChore extends ScheduledChore { + private int numberOfFailures; + private int failureThreshold; /** - * A Chore that will try to execute the initial chore a few times before succeeding. Once the - * initial chore is complete the chore cancels itself + * @param failThreshold Number of times the Chore fails when trying to execute initialChore + * before succeeding. */ - public static class FailInitialChore extends ScheduledChore { - private int numberOfFailures; - private int failureThreshold; - - /** - * @param failThreshold Number of times the Chore fails when trying to execute initialChore - * before succeeding. - */ - public FailInitialChore(String name, int period, int failThreshold) { - this(name, new SampleStopper(), period, failThreshold); - } - - public FailInitialChore(String name, Stoppable stopper, int period, int failThreshold) { - super(name, stopper, period); - numberOfFailures = 0; - failureThreshold = failThreshold; - } + public FailInitialChore(String name, int period, int failThreshold) { + this(name, new SampleStopper(), period, failThreshold); + } - @Override - protected boolean initialChore() { - if (numberOfFailures < failureThreshold) { - numberOfFailures++; - return false; - } else { - return true; - } - } + public FailInitialChore(String name, Stoppable stopper, int period, int failThreshold) { + super(name, stopper, period); + numberOfFailures = 0; + failureThreshold = failThreshold; + } - @Override - protected void chore() { - assertTrue(numberOfFailures == failureThreshold); - cancel(false); + @Override + protected boolean initialChore() { + if (numberOfFailures < failureThreshold) { + numberOfFailures++; + return false; + } else { + return true; } + } + @Override + protected void chore() { + assertTrue(numberOfFailures == failureThreshold); + cancel(false); } } @Test public void testInitialChorePrecedence() throws InterruptedException { - ChoreService service = new ChoreService("testInitialChorePrecedence"); - final int period = 100; final int failureThreshold = 5; - - try { - ScheduledChore chore = new FailInitialChore("chore", period, failureThreshold); - service.scheduleChore(chore); - - int loopCount = 0; - boolean brokeOutOfLoop = false; - - while (!chore.isInitialChoreComplete() && chore.isScheduled()) { - Thread.sleep(failureThreshold * period); - loopCount++; - if (loopCount > 3) { - brokeOutOfLoop = true; - break; - } + ScheduledChore chore = new FailInitialChore("chore", period, failureThreshold); + service.scheduleChore(chore); + + int loopCount = 0; + boolean brokeOutOfLoop = false; + + while (!chore.isInitialChoreComplete() && chore.isScheduled()) { + Thread.sleep(failureThreshold * period); + loopCount++; + if (loopCount > 3) { + brokeOutOfLoop = true; + break; } - - assertFalse(brokeOutOfLoop); - } finally { - shutdownService(service); } + + assertFalse(brokeOutOfLoop); } @Test public void testCancelChore() throws InterruptedException { final int period = 100; - ScheduledChore chore1 = new DoNothingChore("chore1", period); - ChoreService service = new ChoreService("testCancelChore"); - try { - service.scheduleChore(chore1); - assertTrue(chore1.isScheduled()); + ScheduledChore chore = new DoNothingChore("chore", period); + service.scheduleChore(chore); + assertTrue(chore.isScheduled()); - chore1.cancel(true); - assertFalse(chore1.isScheduled()); - assertTrue(service.getNumberOfScheduledChores() == 0); - } finally { - shutdownService(service); - } + chore.cancel(true); + assertFalse(chore.isScheduled()); + assertTrue(service.getNumberOfScheduledChores() == 0); } @Test @@ -304,12 +279,12 @@ public void testScheduledChoreConstruction() { final TimeUnit UNIT = TimeUnit.NANOSECONDS; ScheduledChore chore1 = - new ScheduledChore(NAME, new SampleStopper(), PERIOD, VALID_DELAY, UNIT) { - @Override - protected void chore() { - // DO NOTHING - } - }; + new ScheduledChore(NAME, new SampleStopper(), PERIOD, VALID_DELAY, UNIT) { + @Override + protected void chore() { + // DO NOTHING + } + }; assertEquals("Name construction failed", NAME, chore1.getName()); assertEquals("Period construction failed", PERIOD, chore1.getPeriod()); @@ -317,12 +292,12 @@ protected void chore() { assertEquals("TimeUnit construction failed", UNIT, chore1.getTimeUnit()); ScheduledChore invalidDelayChore = - new ScheduledChore(NAME, new SampleStopper(), PERIOD, INVALID_DELAY, UNIT) { - @Override - protected void chore() { - // DO NOTHING - } - }; + new ScheduledChore(NAME, new SampleStopper(), PERIOD, INVALID_DELAY, UNIT) { + @Override + protected void chore() { + // DO NOTHING + } + }; assertEquals("Initial Delay should be set to 0 when invalid", 0, invalidDelayChore.getInitialDelay()); @@ -334,7 +309,7 @@ public void testChoreServiceConstruction() throws InterruptedException { final int defaultCorePoolSize = ChoreService.MIN_CORE_POOL_SIZE; ChoreService customInit = - new ChoreService("testChoreServiceConstruction_custom", corePoolSize, false); + new ChoreService("testChoreServiceConstruction_custom", corePoolSize, false); try { assertEquals(corePoolSize, customInit.getCorePoolSize()); } finally { @@ -360,258 +335,218 @@ public void testChoreServiceConstruction() throws InterruptedException { public void testFrequencyOfChores() throws InterruptedException { final int period = 100; // Small delta that acts as time buffer (allowing chores to complete if running slowly) - final int delta = period/5; - ChoreService service = new ChoreService("testFrequencyOfChores"); + final int delta = period / 5; CountingChore chore = new CountingChore("countingChore", period); - try { - service.scheduleChore(chore); + service.scheduleChore(chore); - Thread.sleep(10 * period + delta); - assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); + Thread.sleep(10 * period + delta); + assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); - Thread.sleep(10 * period + delta); - assertEquals("20 periods have elapsed.", 21, chore.getCountOfChoreCalls()); - } finally { - shutdownService(service); - } + Thread.sleep(10 * period + delta); + assertEquals("20 periods have elapsed.", 21, chore.getCountOfChoreCalls()); } - public void shutdownService(ChoreService service) throws InterruptedException { + public void shutdownService(ChoreService service) { service.shutdown(); - while (!service.isTerminated()) { - Thread.sleep(100); - } + Waiter.waitFor(CONF, 1000, () -> service.isTerminated()); } @Test public void testForceTrigger() throws InterruptedException { final int period = 100; - final int delta = period/10; - ChoreService service = new ChoreService("testForceTrigger"); + final int delta = period / 10; final CountingChore chore = new CountingChore("countingChore", period); - try { - service.scheduleChore(chore); - Thread.sleep(10 * period + delta); - - assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); - - // Force five runs of the chore to occur, sleeping between triggers to ensure the - // chore has time to run - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - chore.triggerNow(); - Thread.sleep(delta); - - assertEquals("Trigger was called 5 times after 10 periods.", 16, - chore.getCountOfChoreCalls()); - - Thread.sleep(10 * period + delta); - - // Be loosey-goosey. It used to be '26' but it was a big flakey relying on timing. - assertTrue("Expected at least 16 invocations, instead got " + chore.getCountOfChoreCalls(), - chore.getCountOfChoreCalls() > 16); - } finally { - shutdownService(service); - } + service.scheduleChore(chore); + Thread.sleep(10 * period + delta); + + assertEquals("10 periods have elapsed.", 11, chore.getCountOfChoreCalls()); + + // Force five runs of the chore to occur, sleeping between triggers to ensure the + // chore has time to run + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + chore.triggerNow(); + Thread.sleep(delta); + + assertEquals("Trigger was called 5 times after 10 periods.", 16, chore.getCountOfChoreCalls()); + + Thread.sleep(10 * period + delta); + + // Be loosey-goosey. It used to be '26' but it was a big flakey relying on timing. + assertTrue("Expected at least 16 invocations, instead got " + chore.getCountOfChoreCalls(), + chore.getCountOfChoreCalls() > 16); } @Test public void testCorePoolIncrease() throws InterruptedException { - final int initialCorePoolSize = 3; - ChoreService service = new ChoreService("testCorePoolIncrease", initialCorePoolSize, false); + assertEquals("Setting core pool size gave unexpected results.", initialCorePoolSize, + service.getCorePoolSize()); - try { - assertEquals("Setting core pool size gave unexpected results.", initialCorePoolSize, - service.getCorePoolSize()); - - final int slowChorePeriod = 100; - SlowChore slowChore1 = new SlowChore("slowChore1", slowChorePeriod); - SlowChore slowChore2 = new SlowChore("slowChore2", slowChorePeriod); - SlowChore slowChore3 = new SlowChore("slowChore3", slowChorePeriod); + final int slowChorePeriod = 100; + SlowChore slowChore1 = new SlowChore("slowChore1", slowChorePeriod); + SlowChore slowChore2 = new SlowChore("slowChore2", slowChorePeriod); + SlowChore slowChore3 = new SlowChore("slowChore3", slowChorePeriod); - service.scheduleChore(slowChore1); - service.scheduleChore(slowChore2); - service.scheduleChore(slowChore3); + service.scheduleChore(slowChore1); + service.scheduleChore(slowChore2); + service.scheduleChore(slowChore3); - Thread.sleep(slowChorePeriod * 10); - assertEquals("Should not create more pools than scheduled chores", 3, - service.getCorePoolSize()); + Thread.sleep(slowChorePeriod * 10); + assertEquals("Should not create more pools than scheduled chores", 3, + service.getCorePoolSize()); - SlowChore slowChore4 = new SlowChore("slowChore4", slowChorePeriod); - service.scheduleChore(slowChore4); + SlowChore slowChore4 = new SlowChore("slowChore4", slowChorePeriod); + service.scheduleChore(slowChore4); - Thread.sleep(slowChorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", 4, - service.getCorePoolSize()); + Thread.sleep(slowChorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", 4, + service.getCorePoolSize()); - SlowChore slowChore5 = new SlowChore("slowChore5", slowChorePeriod); - service.scheduleChore(slowChore5); + SlowChore slowChore5 = new SlowChore("slowChore5", slowChorePeriod); + service.scheduleChore(slowChore5); - Thread.sleep(slowChorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", 5, - service.getCorePoolSize()); - } finally { - shutdownService(service); - } + Thread.sleep(slowChorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", 5, + service.getCorePoolSize()); } @Test public void testCorePoolDecrease() throws InterruptedException { - final int initialCorePoolSize = 3; - ChoreService service = new ChoreService("testCorePoolDecrease", initialCorePoolSize, false); final int chorePeriod = 100; - try { - // Slow chores always miss their start time and thus the core pool size should be at least as - // large as the number of running slow chores - SlowChore slowChore1 = new SlowChore("slowChore1", chorePeriod); - SlowChore slowChore2 = new SlowChore("slowChore2", chorePeriod); - SlowChore slowChore3 = new SlowChore("slowChore3", chorePeriod); - - service.scheduleChore(slowChore1); - service.scheduleChore(slowChore2); - service.scheduleChore(slowChore3); - - Thread.sleep(chorePeriod * 10); - assertEquals("Should not create more pools than scheduled chores", - service.getNumberOfScheduledChores(), service.getCorePoolSize()); - - SlowChore slowChore4 = new SlowChore("slowChore4", chorePeriod); - service.scheduleChore(slowChore4); - Thread.sleep(chorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", - service.getNumberOfScheduledChores(), service.getCorePoolSize()); - - SlowChore slowChore5 = new SlowChore("slowChore5", chorePeriod); - service.scheduleChore(slowChore5); - Thread.sleep(chorePeriod * 10); - assertEquals("Chores are missing their start time. Should expand core pool size", - service.getNumberOfScheduledChores(), service.getCorePoolSize()); - assertEquals(5, service.getNumberOfChoresMissingStartTime()); - - // Now we begin to cancel the chores that caused an increase in the core thread pool of the - // ChoreService. These cancellations should cause a decrease in the core thread pool. - slowChore5.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(4, service.getNumberOfChoresMissingStartTime()); - - slowChore4.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(3, service.getNumberOfChoresMissingStartTime()); - - slowChore3.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(2, service.getNumberOfChoresMissingStartTime()); - - slowChore2.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(1, service.getNumberOfChoresMissingStartTime()); - - slowChore1.cancel(); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(0, service.getNumberOfChoresMissingStartTime()); - } finally { - shutdownService(service); - } + // Slow chores always miss their start time and thus the core pool size should be at least as + // large as the number of running slow chores + SlowChore slowChore1 = new SlowChore("slowChore1", chorePeriod); + SlowChore slowChore2 = new SlowChore("slowChore2", chorePeriod); + SlowChore slowChore3 = new SlowChore("slowChore3", chorePeriod); + + service.scheduleChore(slowChore1); + service.scheduleChore(slowChore2); + service.scheduleChore(slowChore3); + + Thread.sleep(chorePeriod * 10); + assertEquals("Should not create more pools than scheduled chores", + service.getNumberOfScheduledChores(), service.getCorePoolSize()); + + SlowChore slowChore4 = new SlowChore("slowChore4", chorePeriod); + service.scheduleChore(slowChore4); + Thread.sleep(chorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", + service.getNumberOfScheduledChores(), service.getCorePoolSize()); + + SlowChore slowChore5 = new SlowChore("slowChore5", chorePeriod); + service.scheduleChore(slowChore5); + Thread.sleep(chorePeriod * 10); + assertEquals("Chores are missing their start time. Should expand core pool size", + service.getNumberOfScheduledChores(), service.getCorePoolSize()); + assertEquals(5, service.getNumberOfChoresMissingStartTime()); + + // Now we begin to cancel the chores that caused an increase in the core thread pool of the + // ChoreService. These cancellations should cause a decrease in the core thread pool. + slowChore5.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(4, service.getNumberOfChoresMissingStartTime()); + + slowChore4.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(3, service.getNumberOfChoresMissingStartTime()); + + slowChore3.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(2, service.getNumberOfChoresMissingStartTime()); + + slowChore2.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(1, service.getNumberOfChoresMissingStartTime()); + + slowChore1.cancel(); + Thread.sleep(chorePeriod * 10); + assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), + service.getCorePoolSize()); + assertEquals(0, service.getNumberOfChoresMissingStartTime()); } @Test public void testNumberOfRunningChores() throws InterruptedException { - ChoreService service = new ChoreService("testNumberOfRunningChores"); - final int period = 100; final int sleepTime = 5; - - try { - DoNothingChore dn1 = new DoNothingChore("dn1", period); - DoNothingChore dn2 = new DoNothingChore("dn2", period); - DoNothingChore dn3 = new DoNothingChore("dn3", period); - DoNothingChore dn4 = new DoNothingChore("dn4", period); - DoNothingChore dn5 = new DoNothingChore("dn5", period); - - service.scheduleChore(dn1); - service.scheduleChore(dn2); - service.scheduleChore(dn3); - service.scheduleChore(dn4); - service.scheduleChore(dn5); - - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 5, service.getNumberOfScheduledChores()); - - dn1.cancel(); - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 4, service.getNumberOfScheduledChores()); - - dn2.cancel(); - dn3.cancel(); - dn4.cancel(); - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 1, service.getNumberOfScheduledChores()); - - dn5.cancel(); - Thread.sleep(sleepTime); - assertEquals("Scheduled chore mismatch", 0, service.getNumberOfScheduledChores()); - } finally { - shutdownService(service); - } + DoNothingChore dn1 = new DoNothingChore("dn1", period); + DoNothingChore dn2 = new DoNothingChore("dn2", period); + DoNothingChore dn3 = new DoNothingChore("dn3", period); + DoNothingChore dn4 = new DoNothingChore("dn4", period); + DoNothingChore dn5 = new DoNothingChore("dn5", period); + + service.scheduleChore(dn1); + service.scheduleChore(dn2); + service.scheduleChore(dn3); + service.scheduleChore(dn4); + service.scheduleChore(dn5); + + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 5, service.getNumberOfScheduledChores()); + + dn1.cancel(); + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 4, service.getNumberOfScheduledChores()); + + dn2.cancel(); + dn3.cancel(); + dn4.cancel(); + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 1, service.getNumberOfScheduledChores()); + + dn5.cancel(); + Thread.sleep(sleepTime); + assertEquals("Scheduled chore mismatch", 0, service.getNumberOfScheduledChores()); } @Test public void testNumberOfChoresMissingStartTime() throws InterruptedException { - ChoreService service = new ChoreService("testNumberOfChoresMissingStartTime"); - final int period = 100; final int sleepTime = 20 * period; - - try { - // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores - // ALWAYS miss their start time since their execution takes longer than their period - SlowChore sc1 = new SlowChore("sc1", period); - SlowChore sc2 = new SlowChore("sc2", period); - SlowChore sc3 = new SlowChore("sc3", period); - SlowChore sc4 = new SlowChore("sc4", period); - SlowChore sc5 = new SlowChore("sc5", period); - - service.scheduleChore(sc1); - service.scheduleChore(sc2); - service.scheduleChore(sc3); - service.scheduleChore(sc4); - service.scheduleChore(sc5); - - Thread.sleep(sleepTime); - assertEquals(5, service.getNumberOfChoresMissingStartTime()); - - sc1.cancel(); - Thread.sleep(sleepTime); - assertEquals(4, service.getNumberOfChoresMissingStartTime()); - - sc2.cancel(); - sc3.cancel(); - sc4.cancel(); - Thread.sleep(sleepTime); - assertEquals(1, service.getNumberOfChoresMissingStartTime()); - - sc5.cancel(); - Thread.sleep(sleepTime); - assertEquals(0, service.getNumberOfChoresMissingStartTime()); - } finally { - shutdownService(service); - } + // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores + // ALWAYS miss their start time since their execution takes longer than their period + SlowChore sc1 = new SlowChore("sc1", period); + SlowChore sc2 = new SlowChore("sc2", period); + SlowChore sc3 = new SlowChore("sc3", period); + SlowChore sc4 = new SlowChore("sc4", period); + SlowChore sc5 = new SlowChore("sc5", period); + + service.scheduleChore(sc1); + service.scheduleChore(sc2); + service.scheduleChore(sc3); + service.scheduleChore(sc4); + service.scheduleChore(sc5); + + Thread.sleep(sleepTime); + assertEquals(5, service.getNumberOfChoresMissingStartTime()); + + sc1.cancel(); + Thread.sleep(sleepTime); + assertEquals(4, service.getNumberOfChoresMissingStartTime()); + + sc2.cancel(); + sc3.cancel(); + sc4.cancel(); + Thread.sleep(sleepTime); + assertEquals(1, service.getNumberOfChoresMissingStartTime()); + + sc5.cancel(); + Thread.sleep(sleepTime); + assertEquals(0, service.getNumberOfChoresMissingStartTime()); } /** @@ -621,163 +556,145 @@ public void testNumberOfChoresMissingStartTime() throws InterruptedException { */ @Test public void testMaximumChoreServiceThreads() throws InterruptedException { - ChoreService service = new ChoreService("testMaximumChoreServiceThreads"); final int period = 100; final int sleepTime = 5 * period; - - try { - // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores - // ALWAYS miss their start time since their execution takes longer than their period. - // Chores that miss their start time will trigger the onChoreMissedStartTime callback - // in the ChoreService. This callback will try to increase the number of core pool - // threads. - SlowChore sc1 = new SlowChore("sc1", period); - SlowChore sc2 = new SlowChore("sc2", period); - SlowChore sc3 = new SlowChore("sc3", period); - SlowChore sc4 = new SlowChore("sc4", period); - SlowChore sc5 = new SlowChore("sc5", period); - - service.scheduleChore(sc1); - service.scheduleChore(sc2); - service.scheduleChore(sc3); - service.scheduleChore(sc4); - service.scheduleChore(sc5); - - Thread.sleep(sleepTime); - assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); - - SlowChore sc6 = new SlowChore("sc6", period); - SlowChore sc7 = new SlowChore("sc7", period); - SlowChore sc8 = new SlowChore("sc8", period); - SlowChore sc9 = new SlowChore("sc9", period); - SlowChore sc10 = new SlowChore("sc10", period); - - service.scheduleChore(sc6); - service.scheduleChore(sc7); - service.scheduleChore(sc8); - service.scheduleChore(sc9); - service.scheduleChore(sc10); - - Thread.sleep(sleepTime); - assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); - } finally { - shutdownService(service); - } + // Slow chores sleep for a length of time LONGER than their period. Thus, SlowChores + // ALWAYS miss their start time since their execution takes longer than their period. + // Chores that miss their start time will trigger the onChoreMissedStartTime callback + // in the ChoreService. This callback will try to increase the number of core pool + // threads. + SlowChore sc1 = new SlowChore("sc1", period); + SlowChore sc2 = new SlowChore("sc2", period); + SlowChore sc3 = new SlowChore("sc3", period); + SlowChore sc4 = new SlowChore("sc4", period); + SlowChore sc5 = new SlowChore("sc5", period); + + service.scheduleChore(sc1); + service.scheduleChore(sc2); + service.scheduleChore(sc3); + service.scheduleChore(sc4); + service.scheduleChore(sc5); + + Thread.sleep(sleepTime); + assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); + + SlowChore sc6 = new SlowChore("sc6", period); + SlowChore sc7 = new SlowChore("sc7", period); + SlowChore sc8 = new SlowChore("sc8", period); + SlowChore sc9 = new SlowChore("sc9", period); + SlowChore sc10 = new SlowChore("sc10", period); + + service.scheduleChore(sc6); + service.scheduleChore(sc7); + service.scheduleChore(sc8); + service.scheduleChore(sc9); + service.scheduleChore(sc10); + + Thread.sleep(sleepTime); + assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); } @Test public void testChangingChoreServices() throws InterruptedException { final int period = 100; final int sleepTime = 10; - ChoreService service1 = new ChoreService("testChangingChoreServices_1"); - ChoreService service2 = new ChoreService("testChangingChoreServices_2"); + ChoreService anotherService = new ChoreService(name.getMethodName() + "_2"); ScheduledChore chore = new DoNothingChore("sample", period); try { assertFalse(chore.isScheduled()); - assertFalse(service1.isChoreScheduled(chore)); - assertFalse(service2.isChoreScheduled(chore)); - assertTrue(chore.getChoreServicer() == null); + assertFalse(service.isChoreScheduled(chore)); + assertFalse(anotherService.isChoreScheduled(chore)); + assertTrue(chore.getChoreService() == null); - service1.scheduleChore(chore); + service.scheduleChore(chore); Thread.sleep(sleepTime); assertTrue(chore.isScheduled()); - assertTrue(service1.isChoreScheduled(chore)); - assertFalse(service2.isChoreScheduled(chore)); - assertFalse(chore.getChoreServicer() == null); + assertTrue(service.isChoreScheduled(chore)); + assertFalse(anotherService.isChoreScheduled(chore)); + assertFalse(chore.getChoreService() == null); - service2.scheduleChore(chore); + anotherService.scheduleChore(chore); Thread.sleep(sleepTime); assertTrue(chore.isScheduled()); - assertFalse(service1.isChoreScheduled(chore)); - assertTrue(service2.isChoreScheduled(chore)); - assertFalse(chore.getChoreServicer() == null); + assertFalse(service.isChoreScheduled(chore)); + assertTrue(anotherService.isChoreScheduled(chore)); + assertFalse(chore.getChoreService() == null); chore.cancel(); assertFalse(chore.isScheduled()); - assertFalse(service1.isChoreScheduled(chore)); - assertFalse(service2.isChoreScheduled(chore)); - assertTrue(chore.getChoreServicer() == null); + assertFalse(service.isChoreScheduled(chore)); + assertFalse(anotherService.isChoreScheduled(chore)); + assertTrue(chore.getChoreService() == null); } finally { - shutdownService(service1); - shutdownService(service2); + shutdownService(anotherService); } } @Test public void testStopperForScheduledChores() throws InterruptedException { - ChoreService service = new ChoreService("testStopperForScheduledChores"); Stoppable stopperForGroup1 = new SampleStopper(); Stoppable stopperForGroup2 = new SampleStopper(); final int period = 100; - final int delta = period/10; - - try { - ScheduledChore chore1_group1 = new DoNothingChore("c1g1", stopperForGroup1, period); - ScheduledChore chore2_group1 = new DoNothingChore("c2g1", stopperForGroup1, period); - ScheduledChore chore3_group1 = new DoNothingChore("c3g1", stopperForGroup1, period); - - ScheduledChore chore1_group2 = new DoNothingChore("c1g2", stopperForGroup2, period); - ScheduledChore chore2_group2 = new DoNothingChore("c2g2", stopperForGroup2, period); - ScheduledChore chore3_group2 = new DoNothingChore("c3g2", stopperForGroup2, period); - - service.scheduleChore(chore1_group1); - service.scheduleChore(chore2_group1); - service.scheduleChore(chore3_group1); - service.scheduleChore(chore1_group2); - service.scheduleChore(chore2_group2); - service.scheduleChore(chore3_group2); - - Thread.sleep(delta); - Thread.sleep(10 * period); - assertTrue(chore1_group1.isScheduled()); - assertTrue(chore2_group1.isScheduled()); - assertTrue(chore3_group1.isScheduled()); - assertTrue(chore1_group2.isScheduled()); - assertTrue(chore2_group2.isScheduled()); - assertTrue(chore3_group2.isScheduled()); - - stopperForGroup1.stop("test stopping group 1"); - Thread.sleep(period); - assertFalse(chore1_group1.isScheduled()); - assertFalse(chore2_group1.isScheduled()); - assertFalse(chore3_group1.isScheduled()); - assertTrue(chore1_group2.isScheduled()); - assertTrue(chore2_group2.isScheduled()); - assertTrue(chore3_group2.isScheduled()); - - stopperForGroup2.stop("test stopping group 2"); - Thread.sleep(period); - assertFalse(chore1_group1.isScheduled()); - assertFalse(chore2_group1.isScheduled()); - assertFalse(chore3_group1.isScheduled()); - assertFalse(chore1_group2.isScheduled()); - assertFalse(chore2_group2.isScheduled()); - assertFalse(chore3_group2.isScheduled()); - } finally { - shutdownService(service); - } + final int delta = period / 10; + ScheduledChore chore1_group1 = new DoNothingChore("c1g1", stopperForGroup1, period); + ScheduledChore chore2_group1 = new DoNothingChore("c2g1", stopperForGroup1, period); + ScheduledChore chore3_group1 = new DoNothingChore("c3g1", stopperForGroup1, period); + + ScheduledChore chore1_group2 = new DoNothingChore("c1g2", stopperForGroup2, period); + ScheduledChore chore2_group2 = new DoNothingChore("c2g2", stopperForGroup2, period); + ScheduledChore chore3_group2 = new DoNothingChore("c3g2", stopperForGroup2, period); + + service.scheduleChore(chore1_group1); + service.scheduleChore(chore2_group1); + service.scheduleChore(chore3_group1); + service.scheduleChore(chore1_group2); + service.scheduleChore(chore2_group2); + service.scheduleChore(chore3_group2); + + Thread.sleep(delta); + Thread.sleep(10 * period); + assertTrue(chore1_group1.isScheduled()); + assertTrue(chore2_group1.isScheduled()); + assertTrue(chore3_group1.isScheduled()); + assertTrue(chore1_group2.isScheduled()); + assertTrue(chore2_group2.isScheduled()); + assertTrue(chore3_group2.isScheduled()); + + stopperForGroup1.stop("test stopping group 1"); + Thread.sleep(period); + assertFalse(chore1_group1.isScheduled()); + assertFalse(chore2_group1.isScheduled()); + assertFalse(chore3_group1.isScheduled()); + assertTrue(chore1_group2.isScheduled()); + assertTrue(chore2_group2.isScheduled()); + assertTrue(chore3_group2.isScheduled()); + + stopperForGroup2.stop("test stopping group 2"); + Thread.sleep(period); + assertFalse(chore1_group1.isScheduled()); + assertFalse(chore2_group1.isScheduled()); + assertFalse(chore3_group1.isScheduled()); + assertFalse(chore1_group2.isScheduled()); + assertFalse(chore2_group2.isScheduled()); + assertFalse(chore3_group2.isScheduled()); } @Test public void testShutdownCancelsScheduledChores() throws InterruptedException { final int period = 100; - ChoreService service = new ChoreService("testShutdownCancelsScheduledChores"); ScheduledChore successChore1 = new DoNothingChore("sc1", period); ScheduledChore successChore2 = new DoNothingChore("sc2", period); ScheduledChore successChore3 = new DoNothingChore("sc3", period); + assertTrue(service.scheduleChore(successChore1)); + assertTrue(successChore1.isScheduled()); + assertTrue(service.scheduleChore(successChore2)); + assertTrue(successChore2.isScheduled()); + assertTrue(service.scheduleChore(successChore3)); + assertTrue(successChore3.isScheduled()); - try { - assertTrue(service.scheduleChore(successChore1)); - assertTrue(successChore1.isScheduled()); - assertTrue(service.scheduleChore(successChore2)); - assertTrue(successChore2.isScheduled()); - assertTrue(service.scheduleChore(successChore3)); - assertTrue(successChore3.isScheduled()); - } finally { - shutdownService(service); - } + shutdownService(service); assertFalse(successChore1.isScheduled()); assertFalse(successChore2.isScheduled()); @@ -788,34 +705,28 @@ public void testShutdownCancelsScheduledChores() throws InterruptedException { public void testShutdownWorksWhileChoresAreExecuting() throws InterruptedException { final int period = 100; final int sleep = 5 * period; - ChoreService service = new ChoreService("testShutdownWorksWhileChoresAreExecuting"); ScheduledChore slowChore1 = new SleepingChore("sc1", period, sleep); ScheduledChore slowChore2 = new SleepingChore("sc2", period, sleep); ScheduledChore slowChore3 = new SleepingChore("sc3", period, sleep); - try { - assertTrue(service.scheduleChore(slowChore1)); - assertTrue(service.scheduleChore(slowChore2)); - assertTrue(service.scheduleChore(slowChore3)); + assertTrue(service.scheduleChore(slowChore1)); + assertTrue(service.scheduleChore(slowChore2)); + assertTrue(service.scheduleChore(slowChore3)); - Thread.sleep(sleep / 2); - shutdownService(service); + Thread.sleep(sleep / 2); + shutdownService(service); - assertFalse(slowChore1.isScheduled()); - assertFalse(slowChore2.isScheduled()); - assertFalse(slowChore3.isScheduled()); - assertTrue(service.isShutdown()); + assertFalse(slowChore1.isScheduled()); + assertFalse(slowChore2.isScheduled()); + assertFalse(slowChore3.isScheduled()); + assertTrue(service.isShutdown()); - Thread.sleep(5); - assertTrue(service.isTerminated()); - } finally { - shutdownService(service); - } + Thread.sleep(5); + assertTrue(service.isTerminated()); } @Test public void testShutdownRejectsNewSchedules() throws InterruptedException { final int period = 100; - ChoreService service = new ChoreService("testShutdownRejectsNewSchedules"); ScheduledChore successChore1 = new DoNothingChore("sc1", period); ScheduledChore successChore2 = new DoNothingChore("sc2", period); ScheduledChore successChore3 = new DoNothingChore("sc3", period); @@ -823,16 +734,14 @@ public void testShutdownRejectsNewSchedules() throws InterruptedException { ScheduledChore failChore2 = new DoNothingChore("fc2", period); ScheduledChore failChore3 = new DoNothingChore("fc3", period); - try { - assertTrue(service.scheduleChore(successChore1)); - assertTrue(successChore1.isScheduled()); - assertTrue(service.scheduleChore(successChore2)); - assertTrue(successChore2.isScheduled()); - assertTrue(service.scheduleChore(successChore3)); - assertTrue(successChore3.isScheduled()); - } finally { - shutdownService(service); - } + assertTrue(service.scheduleChore(successChore1)); + assertTrue(successChore1.isScheduled()); + assertTrue(service.scheduleChore(successChore2)); + assertTrue(successChore2.isScheduled()); + assertTrue(service.scheduleChore(successChore3)); + assertTrue(successChore3.isScheduled()); + + shutdownService(service); assertFalse(service.scheduleChore(failChore1)); assertFalse(failChore1.isScheduled()); @@ -845,17 +754,38 @@ public void testShutdownRejectsNewSchedules() throws InterruptedException { /** * for HBASE-25014 */ - @Test(timeout = 10000) + @Test public void testInitialDelay() { - ChoreService service = new ChoreService(name.getMethodName()); SampleStopper stopper = new SampleStopper(); service.scheduleChore(new ScheduledChore("chore", stopper, 1000, 2000) { - @Override protected void chore() { + @Override + protected void chore() { stopper.stop("test"); } }); - while (!stopper.isStopped()) { - Threads.sleep(1000); - } + Waiter.waitFor(CONF, 5000, () -> stopper.isStopped()); + } + + @Test + public void testCleanupWithStopper() { + SampleStopper stopper = new SampleStopper(); + DoNothingChore chore = spy(new DoNothingChore("chore", stopper, 10)); + service.scheduleChore(chore); + assertTrue(chore.isScheduled()); + verify(chore, never()).cleanup(); + stopper.stop("test"); + Waiter.waitFor(CONF, 200, () -> !chore.isScheduled()); + verify(chore, atLeastOnce()).cleanup(); + } + + @Test + public void testCleanupWithShutdown() { + DoNothingChore chore = spy(new DoNothingChore("chore", 10)); + service.scheduleChore(chore); + assertTrue(chore.isScheduled()); + verify(chore, never()).cleanup(); + chore.shutdown(true); + Waiter.waitFor(CONF, 200, () -> !chore.isScheduled()); + verify(chore, atLeastOnce()).cleanup(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index cbe001e91588..94f3bf2bfda7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -55,7 +55,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -1500,11 +1499,9 @@ private void switchSnapshotCleanup(final boolean on) { try { snapshotCleanupTracker.setSnapshotCleanupEnabled(on); if (on) { - if (!getChoreService().isChoreScheduled(this.snapshotCleanerChore)) { - getChoreService().scheduleChore(this.snapshotCleanerChore); - } + getChoreService().scheduleChore(this.snapshotCleanerChore); } else { - getChoreService().cancelChore(this.snapshotCleanerChore); + this.snapshotCleanerChore.cancel(); } } catch (KeeperException e) { LOG.error("Error updating snapshot cleanup mode to {}", on, e); @@ -1528,24 +1525,23 @@ private void stopProcedureExecutor() { } private void stopChores() { - ChoreService choreService = getChoreService(); - if (choreService != null) { - choreService.cancelChore(this.mobFileCleanerChore); - choreService.cancelChore(this.mobFileCompactionChore); - choreService.cancelChore(this.balancerChore); + if (getChoreService() != null) { + shutdownChore(mobFileCleanerChore); + shutdownChore(mobFileCompactionChore); + shutdownChore(balancerChore); if (regionNormalizerManager != null) { - choreService.cancelChore(regionNormalizerManager.getRegionNormalizerChore()); - } - choreService.cancelChore(this.clusterStatusChore); - choreService.cancelChore(this.catalogJanitorChore); - choreService.cancelChore(this.clusterStatusPublisherChore); - choreService.cancelChore(this.snapshotQuotaChore); - choreService.cancelChore(this.logCleaner); - choreService.cancelChore(this.hfileCleaner); - choreService.cancelChore(this.replicationBarrierCleaner); - choreService.cancelChore(this.snapshotCleanerChore); - choreService.cancelChore(this.hbckChore); - choreService.cancelChore(this.regionsRecoveryChore); + shutdownChore(regionNormalizerManager.getRegionNormalizerChore()); + } + shutdownChore(clusterStatusChore); + shutdownChore(catalogJanitorChore); + shutdownChore(clusterStatusPublisherChore); + shutdownChore(snapshotQuotaChore); + shutdownChore(logCleaner); + shutdownChore(hfileCleaner); + shutdownChore(replicationBarrierCleaner); + shutdownChore(snapshotCleanerChore); + shutdownChore(hbckChore); + shutdownChore(regionsRecoveryChore); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java index a756715062ec..5597cca1152b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HConstants; @@ -70,7 +69,6 @@ public class RegionsRecoveryChore extends ScheduledChore { */ RegionsRecoveryChore(final Stoppable stopper, final Configuration configuration, final HMaster hMaster) { - super(REGIONS_RECOVERY_CHORE_NAME, stopper, configuration.getInt( HConstants.REGIONS_RECOVERY_INTERVAL, HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL)); this.hMaster = hMaster; @@ -125,7 +123,6 @@ protected void chore() { private Map> getTableToRegionsByRefCount( final Map serverMetricsMap) { - final Map> tableToReopenRegionsMap = new HashMap<>(); for (ServerMetrics serverMetrics : serverMetricsMap.values()) { Map regionMetricsMap = serverMetrics.getRegionMetrics(); @@ -146,13 +143,11 @@ private Map> getTableToRegionsByRefCount( } } return tableToReopenRegionsMap; - } private void prepareTableToReopenRegionsMap( final Map> tableToReopenRegionsMap, final byte[] regionName, final int regionStoreRefCount) { - final RegionInfo regionInfo = hMaster.getAssignmentManager().getRegionInfo(regionName); final TableName tableName = regionInfo.getTable(); if (TableName.isMetaTableName(tableName)) { @@ -165,21 +160,4 @@ private void prepareTableToReopenRegionsMap( tableToReopenRegionsMap .computeIfAbsent(tableName, (key) -> new ArrayList<>()).add(regionName); } - - // hashcode/equals implementation to ensure at-most one object of RegionsRecoveryChore - // is scheduled at a time - RegionsRecoveryConfigManager - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - return o != null && getClass() == o.getClass(); - } - - @Override - public int hashCode() { - return 31; - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java index b1bfdc0ecb04..78777a18cfd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryConfigManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master; +import com.google.errorprone.annotations.RestrictedApi; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HConstants; @@ -27,8 +28,7 @@ import org.slf4j.LoggerFactory; /** - * Config manager for RegionsRecovery Chore - Dynamically reload config and update chore - * accordingly + * Config manager for RegionsRecovery Chore - Dynamically reload config and update chore accordingly */ @InterfaceAudience.Private public class RegionsRecoveryConfigManager implements ConfigurationObserver { @@ -36,6 +36,7 @@ public class RegionsRecoveryConfigManager implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(RegionsRecoveryConfigManager.class); private final HMaster hMaster; + private RegionsRecoveryChore chore; private int prevMaxStoreFileRefCount; private int prevRegionsRecoveryInterval; @@ -51,34 +52,35 @@ public void onConfigurationChange(Configuration conf) { final int newMaxStoreFileRefCount = getMaxStoreFileRefCount(conf); final int newRegionsRecoveryInterval = getRegionsRecoveryChoreInterval(conf); - if (prevMaxStoreFileRefCount == newMaxStoreFileRefCount - && prevRegionsRecoveryInterval == newRegionsRecoveryInterval) { + if (prevMaxStoreFileRefCount == newMaxStoreFileRefCount && + prevRegionsRecoveryInterval == newRegionsRecoveryInterval) { // no need to re-schedule the chore with updated config // as there is no change in desired configs return; } - LOG.info("Config Reload for RegionsRecovery Chore. prevMaxStoreFileRefCount: {}," + + LOG.info( + "Config Reload for RegionsRecovery Chore. prevMaxStoreFileRefCount: {}," + " newMaxStoreFileRefCount: {}, prevRegionsRecoveryInterval: {}, " + - "newRegionsRecoveryInterval: {}", prevMaxStoreFileRefCount, newMaxStoreFileRefCount, - prevRegionsRecoveryInterval, newRegionsRecoveryInterval); + "newRegionsRecoveryInterval: {}", + prevMaxStoreFileRefCount, newMaxStoreFileRefCount, prevRegionsRecoveryInterval, + newRegionsRecoveryInterval); - RegionsRecoveryChore regionsRecoveryChore = new RegionsRecoveryChore(this.hMaster, - conf, this.hMaster); + RegionsRecoveryChore regionsRecoveryChore = + new RegionsRecoveryChore(this.hMaster, conf, this.hMaster); ChoreService choreService = this.hMaster.getChoreService(); // Regions Reopen based on very high storeFileRefCount is considered enabled // only if hbase.regions.recovery.store.file.ref.count has value > 0 - synchronized (this) { + if (chore != null) { + chore.shutdown(); + chore = null; + } if (newMaxStoreFileRefCount > 0) { - // reschedule the chore - // provide mayInterruptIfRunning - false to take care of completion - // of in progress task if any - choreService.cancelChore(regionsRecoveryChore, false); + // schedule the new chore choreService.scheduleChore(regionsRecoveryChore); - } else { - choreService.cancelChore(regionsRecoveryChore, false); + chore = regionsRecoveryChore; } this.prevMaxStoreFileRefCount = newMaxStoreFileRefCount; this.prevRegionsRecoveryInterval = newRegionsRecoveryInterval; @@ -86,15 +88,18 @@ public void onConfigurationChange(Configuration conf) { } private int getMaxStoreFileRefCount(Configuration configuration) { - return configuration.getInt( - HConstants.STORE_FILE_REF_COUNT_THRESHOLD, + return configuration.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD); } private int getRegionsRecoveryChoreInterval(Configuration configuration) { - return configuration.getInt( - HConstants.REGIONS_RECOVERY_INTERVAL, + return configuration.getInt(HConstants.REGIONS_RECOVERY_INTERVAL, HConstants.DEFAULT_REGIONS_RECOVERY_INTERVAL); } + @RestrictedApi(explanation = "Only visible for testing", link = "", + allowedOnPath = ".*/src/test/.*") + RegionsRecoveryChore getChore() { + return chore; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 8977174edba7..f91f04000cd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -936,7 +936,7 @@ public void startChore() { */ public void stop() { if (flushedSeqIdFlusher != null) { - flushedSeqIdFlusher.cancel(); + flushedSeqIdFlusher.shutdown(); } if (persistFlushedSequenceId) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index f628841cb4fc..186a8ff11bba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -456,7 +456,7 @@ public void stop() { choreService.shutdown(); } if (timeoutMonitor != null) { - timeoutMonitor.cancel(true); + timeoutMonitor.shutdown(true); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index bd1bff157cd4..f2d88bac527a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -102,7 +102,7 @@ public void start() throws IOException { public void stop(final String why) { if (refreshChore != null) { LOG.debug("Stopping QuotaRefresherChore chore."); - refreshChore.cancel(true); + refreshChore.shutdown(true); } stopped = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java index 81e7e87603c0..282075b6d71b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java @@ -98,11 +98,11 @@ public synchronized void start() throws IOException { public synchronized void stop() { if (spaceQuotaRefresher != null) { - spaceQuotaRefresher.cancel(); + spaceQuotaRefresher.shutdown(); spaceQuotaRefresher = null; } if (regionSizeReporter != null) { - regionSizeReporter.cancel(); + regionSizeReporter.shutdown(); regionSizeReporter = null; } started = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index bcb143652203..e40e25158269 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -2642,6 +2642,11 @@ private void scheduleAbortTimer() { } } + protected final void shutdownChore(ScheduledChore chore) { + if (chore != null) { + chore.shutdown(); + } + } /** * Wait on all threads to finish. Presumption is that all closes and stops * have already been called. @@ -2649,15 +2654,16 @@ private void scheduleAbortTimer() { protected void stopServiceThreads() { // clean up the scheduled chores if (this.choreService != null) { - choreService.cancelChore(nonceManagerChore); - choreService.cancelChore(compactionChecker); - choreService.cancelChore(periodicFlusher); - choreService.cancelChore(healthCheckChore); - choreService.cancelChore(executorStatusChore); - choreService.cancelChore(storefileRefresher); - choreService.cancelChore(fsUtilizationChore); - choreService.cancelChore(slowLogTableOpsChore); - // clean up the remaining scheduled chores (in case we missed out any) + shutdownChore(nonceManagerChore); + shutdownChore(compactionChecker); + shutdownChore(periodicFlusher); + shutdownChore(healthCheckChore); + shutdownChore(executorStatusChore); + shutdownChore(storefileRefresher); + shutdownChore(fsUtilizationChore); + shutdownChore(slowLogTableOpsChore); + // cancel the remaining scheduled chores (in case we missed out any) + // TODO: cancel will not cleanup the chores, so we need make sure we do not miss any choreService.shutdown(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index 1f831eefee62..342ec18e1ed9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -216,7 +216,7 @@ public void start(ChoreService service) { public void stop() { // The thread is Daemon. Just interrupting the ongoing process. LOG.info("Stopping"); - this.heapMemTunerChore.cancel(true); + this.heapMemTunerChore.shutdown(true); } public void registerTuneObserver(HeapMemoryTuneObserver observer) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java index d29e061d07fd..6819e5d2b110 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java @@ -18,18 +18,18 @@ package org.apache.hadoop.hbase.master; -import java.io.IOException; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.StartMiniClusterOption; -import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -38,7 +38,7 @@ /** * Test for Regions Recovery Config Manager */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestRegionsRecoveryConfigManager { @ClassRule @@ -51,8 +51,6 @@ public class TestRegionsRecoveryConfigManager { private HMaster hMaster; - private RegionsRecoveryChore regionsRecoveryChore; - private RegionsRecoveryConfigManager regionsRecoveryConfigManager; private Configuration conf; @@ -62,10 +60,8 @@ public void setup() throws Exception { conf = HBASE_TESTING_UTILITY.getConfiguration(); conf.unset("hbase.regions.recovery.store.file.ref.count"); conf.unset("hbase.master.regions.recovery.check.interval"); - StartMiniClusterOption option = StartMiniClusterOption.builder() - .masterClass(TestHMaster.class) - .numRegionServers(1) - .numDataNodes(1).build(); + StartMiniClusterOption option = StartMiniClusterOption.builder().masterClass(TestHMaster.class) + .numRegionServers(1).numDataNodes(1).build(); HBASE_TESTING_UTILITY.startMiniCluster(option); cluster = HBASE_TESTING_UTILITY.getMiniHBaseCluster(); } @@ -77,44 +73,44 @@ public void tearDown() throws Exception { @Test public void testChoreSchedule() throws Exception { - this.hMaster = cluster.getMaster(); - Stoppable stoppable = new StoppableImplementation(); - this.regionsRecoveryChore = new RegionsRecoveryChore(stoppable, conf, hMaster); - this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this.hMaster); // not yet scheduled - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // not yet scheduled - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.master.regions.recovery.check.interval", 10); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // not yet scheduled - missing config: hbase.regions.recovery.store.file.ref.count - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.regions.recovery.store.file.ref.count", 10); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore scheduled - Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.regions.recovery.store.file.ref.count", 20); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore re-scheduled - Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.setInt("hbase.regions.recovery.store.file.ref.count", 20); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore scheduling untouched - Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertTrue(hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); conf.unset("hbase.regions.recovery.store.file.ref.count"); this.regionsRecoveryConfigManager.onConfigurationChange(conf); // chore un-scheduled - Assert.assertFalse(hMaster.getChoreService().isChoreScheduled(regionsRecoveryChore)); + assertFalse( + hMaster.getChoreService().isChoreScheduled(regionsRecoveryConfigManager.getChore())); } // Make it public so that JVMClusterUtil can access it. @@ -123,24 +119,4 @@ public TestHMaster(Configuration conf) throws IOException { super(conf); } } - - /** - * Simple helper class that just keeps track of whether or not its stopped. - */ - private static class StoppableImplementation implements Stoppable { - - private boolean stop = false; - - @Override - public void stop(String why) { - this.stop = true; - } - - @Override - public boolean isStopped() { - return this.stop; - } - - } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java index 252276819cbe..b1fdf2676575 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java @@ -111,7 +111,7 @@ public void setup() throws IOException, KeeperException { @After public void teardown() { - this.janitor.cancel(true); + this.janitor.shutdown(true); this.masterServices.stop("DONE"); } From 023d2934bada8710ac79ccdc7a96e284e1594244 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Wed, 20 Jan 2021 09:04:50 -0800 Subject: [PATCH 656/769] =?UTF-8?q?HBASE-25368=20Filter=20out=20more=20inv?= =?UTF-8?q?alid=20encoded=20name=20in=20isEncodedRegionNa=E2=80=A6=20(#286?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) Signed-off-by: Duo Zhang --- .../hbase/client/RawAsyncHBaseAdmin.java | 87 ++++++++++--------- .../hadoop/hbase/client/RegionInfo.java | 18 +++- .../hadoop/hbase/client/TestAdmin1.java | 19 ++++ .../hadoop/hbase/client/TestAdmin2.java | 8 +- 4 files changed, 85 insertions(+), 47 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 512e7a96aa6d..38bdddef1e5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2388,51 +2388,56 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } - try { - CompletableFuture> future; - if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { - String encodedName = Bytes.toString(regionNameOrEncodedRegionName); - if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { - // old format encodedName, should be meta region - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); - } else { - future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, - regionNameOrEncodedRegionName); - } + + CompletableFuture> future; + if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { + String encodedName = Bytes.toString(regionNameOrEncodedRegionName); + if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { + // old format encodedName, should be meta region + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); } else { - RegionInfo regionInfo = - CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); - if (regionInfo.isMetaRegion()) { - future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); - } else { - future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); - } + future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, + regionNameOrEncodedRegionName); + } + } else { + // Not all regionNameOrEncodedRegionName here is going to be a valid region name, + // it needs to throw out IllegalArgumentException in case tableName is passed in. + RegionInfo regionInfo; + try { + regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( + regionNameOrEncodedRegionName); + } catch (IOException ioe) { + return failedFuture(new IllegalArgumentException(ioe.getMessage())); } - CompletableFuture returnedFuture = new CompletableFuture<>(); - addListener(future, (location, err) -> { - if (err != null) { - returnedFuture.completeExceptionally(err); - return; - } - if (!location.isPresent() || location.get().getRegion() == null) { - returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); - } else { - returnedFuture.complete(location.get()); - } - }); - return returnedFuture; - } catch (IOException e) { - return failedFuture(e); + if (regionInfo.isMetaRegion()) { + future = connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); + } else { + future = + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + } } + + CompletableFuture returnedFuture = new CompletableFuture<>(); + addListener(future, (location, err) -> { + if (err != null) { + returnedFuture.completeExceptionally(err); + return; + } + if (!location.isPresent() || location.get().getRegion() == null) { + returnedFuture.completeExceptionally( + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); + } else { + returnedFuture.complete(location.get()); + } + }); + return returnedFuture; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index d7460e9d15ef..b6bdd0103de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,7 +363,23 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; + if (parseRegionNameOrReturnNull(regionName) == null) { + if (regionName.length > MD5_HEX_LENGTH) { + return false; + } else if (regionName.length == MD5_HEX_LENGTH) { + return true; + } else { + String encodedName = Bytes.toString(regionName); + try { + Integer.parseInt(encodedName); + // If this is a valid integer, it could be hbase:meta's encoded region name. + return true; + } catch(NumberFormatException er) { + return false; + } + } + } + return false; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index a0ed836f9c75..b48841660166 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -99,6 +100,24 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { assertTrue(exception instanceof TableNotFoundException); } + @Test + public void testCompactATableWithSuperLongTableName() throws Exception { + TableName tableName = TableName.valueOf(name.getMethodName()); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + try { + ADMIN.createTable(htd); + assertThrows(IllegalArgumentException.class, + () -> ADMIN.majorCompactRegion(tableName.getName())); + + assertThrows(IllegalArgumentException.class, + () -> ADMIN.majorCompactRegion(Bytes.toBytes("abcd"))); + } finally { + ADMIN.disableTable(tableName); + ADMIN.deleteTable(tableName); + } + } + @Test public void testCompactionTimestamps() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 914152b58dec..b0271a006aca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -298,11 +298,9 @@ public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { if (!regionInfo.isMetaRegion()) { if (regionInfo.getRegionNameAsString().contains(name)) { info = regionInfo; - try { - ADMIN.unassign(Bytes.toBytes("sample"), true); - } catch (UnknownRegionException nsre) { - // expected, ignore it - } + assertThrows(UnknownRegionException.class, + () -> ADMIN.unassign(Bytes.toBytes( + "test,,1358563771069.acc1ad1b7962564fc3a43e5907e8db33."), true)); } } } From e871ed2c65d9cfdd689a56de1a83a7e3bee27611 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Fri, 22 Jan 2021 19:25:06 -0800 Subject: [PATCH 657/769] HBASE-25416 Add 2.3.4 to the downloads page (#2902) Signed-off-by: Nick Dimiduk --- src/site/xdoc/downloads.xml | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index fe6f3d8d198e..a4123ba5d559 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -68,26 +68,26 @@ under the License. - 2.3.3 + 2.3.4 - 2020/11/02 + 2021/01/22 - 2.3.2 vs 2.3.3 + 2.3.3 vs 2.3.4 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) - + stable release @@ -110,7 +110,6 @@ under the License. bin (sha512 asc)
    client-bin (sha512 asc) - stable release From ae3bab8e4d30c027cd74273be75b48b813a0c57d Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Sun, 24 Jan 2021 23:37:03 +0800 Subject: [PATCH 658/769] HBASE-25522 Remove deprecated methods in ReplicationPeerConfig (#2898) Signed-off-by: Viraj Jasani --- .../replication/ReplicationPeerConfig.java | 97 ----------------- .../client/TestAsyncReplicationAdminApi.java | 102 ++++++++---------- ...tAsyncReplicationAdminApiWithClusters.java | 27 ++--- .../hbase/client/TestReplicaWithCluster.java | 4 +- .../replication/TestMasterReplication.java | 10 +- .../TestMultiSlaveReplication.java | 11 +- .../replication/TestReplicationEndpoint.java | 60 ++++++----- .../replication/TestReplicationWithTags.java | 4 +- .../TestGlobalReplicationThrottler.java | 4 +- .../regionserver/TestReplicator.java | 9 +- .../security/access/TestAccessController.java | 3 +- ...bilityLabelReplicationWithExpAsString.java | 5 +- .../TestVisibilityLabelsReplication.java | 4 +- 13 files changed, 125 insertions(+), 215 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index bb3ff042ca06..5ca5cef9c4ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -79,41 +79,6 @@ private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) { return Collections.unmodifiableMap(newTableCFsMap); } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder} to create new ReplicationPeerConfig. - */ - @Deprecated - public ReplicationPeerConfig() { - this.peerData = new TreeMap<>(Bytes.BYTES_COMPARATOR); - this.configuration = new HashMap<>(0); - this.serial = false; - } - - /** - * Set the clusterKey which is the concatenation of the slave cluster's: - * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setClusterKey(String)} instead. - */ - @Deprecated - public ReplicationPeerConfig setClusterKey(String clusterKey) { - this.clusterKey = clusterKey; - return this; - } - - /** - * Sets the ReplicationEndpoint plugin class for this peer. - * @param replicationEndpointImpl a class implementing ReplicationEndpoint - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setReplicationEndpointImpl(String)} instead. - */ - @Deprecated - public ReplicationPeerConfig setReplicationEndpointImpl(String replicationEndpointImpl) { - this.replicationEndpointImpl = replicationEndpointImpl; - return this; - } - public String getClusterKey() { return clusterKey; } @@ -134,88 +99,26 @@ public Map> getTableCFsMap() { return (Map>) tableCFsMap; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setTableCFsMap(Map)} instead. - */ - @Deprecated - public ReplicationPeerConfig setTableCFsMap(Map> tableCFsMap) { - this.tableCFsMap = tableCFsMap; - return this; - } - public Set getNamespaces() { return this.namespaces; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setNamespaces(Set)} instead. - */ - @Deprecated - public ReplicationPeerConfig setNamespaces(Set namespaces) { - this.namespaces = namespaces; - return this; - } - public long getBandwidth() { return this.bandwidth; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setBandwidth(long)} instead. - */ - @Deprecated - public ReplicationPeerConfig setBandwidth(long bandwidth) { - this.bandwidth = bandwidth; - return this; - } - public boolean replicateAllUserTables() { return this.replicateAllUserTables; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setReplicateAllUserTables(boolean)} instead. - */ - @Deprecated - public ReplicationPeerConfig setReplicateAllUserTables(boolean replicateAllUserTables) { - this.replicateAllUserTables = replicateAllUserTables; - return this; - } - public Map> getExcludeTableCFsMap() { return (Map>) excludeTableCFsMap; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setExcludeTableCFsMap(Map)} instead. - */ - @Deprecated - public ReplicationPeerConfig setExcludeTableCFsMap(Map> tableCFsMap) { - this.excludeTableCFsMap = tableCFsMap; - return this; - } - public Set getExcludeNamespaces() { return this.excludeNamespaces; } - /** - * @deprecated as release of 2.0.0, and it will be removed in 3.0.0. Use - * {@link ReplicationPeerConfigBuilder#setExcludeNamespaces(Set)} instead. - */ - @Deprecated - public ReplicationPeerConfig setExcludeNamespaces(Set namespaces) { - this.excludeNamespaces = namespaces; - return this; - } - public String getRemoteWALDir() { return this.remoteWALDir; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java index 74b5c2fbd3c8..479fe6b35a43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; @@ -108,10 +109,8 @@ public void clearPeerAndQueues() throws IOException, ReplicationException { @Test public void testAddRemovePeer() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); - ReplicationPeerConfig rpc2 = new ReplicationPeerConfig(); - rpc2.setClusterKey(KEY_TWO); + ReplicationPeerConfig rpc1 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(); + ReplicationPeerConfig rpc2 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_TWO).build(); // Add a valid peer admin.addReplicationPeer(ID_ONE, rpc1).join(); // try adding the same (fails) @@ -142,10 +141,11 @@ public void testAddRemovePeer() throws Exception { @Test public void testPeerConfig() throws Exception { - ReplicationPeerConfig config = new ReplicationPeerConfig(); - config.setClusterKey(KEY_ONE); - config.getConfiguration().put("key1", "value1"); - config.getConfiguration().put("key2", "value2"); + ReplicationPeerConfig config = ReplicationPeerConfig.newBuilder() + .setClusterKey(KEY_ONE) + .putConfiguration("key1", "value1") + .putConfiguration("key2", "value2") + .build(); admin.addReplicationPeer(ID_ONE, config).join(); List peers = admin.listReplicationPeers().get(); @@ -160,8 +160,7 @@ public void testPeerConfig() throws Exception { @Test public void testEnableDisablePeer() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); + ReplicationPeerConfig rpc1 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(); admin.addReplicationPeer(ID_ONE, rpc1).join(); List peers = admin.listReplicationPeers().get(); assertEquals(1, peers.size()); @@ -176,8 +175,8 @@ public void testEnableDisablePeer() throws Exception { @Test public void testAppendPeerTableCFs() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); final TableName tableName1 = TableName.valueOf(tableName.getNameAsString() + "t1"); final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "t2"); final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "t3"); @@ -186,9 +185,9 @@ public void testAppendPeerTableCFs() throws Exception { final TableName tableName6 = TableName.valueOf(tableName.getNameAsString() + "t6"); // Add a valid peer - admin.addReplicationPeer(ID_ONE, rpc1).join(); - rpc1.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc1).join(); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); Map> tableCFs = new HashMap<>(); @@ -280,16 +279,16 @@ public void testAppendPeerTableCFs() throws Exception { @Test public void testRemovePeerTableCFs() throws Exception { - ReplicationPeerConfig rpc1 = new ReplicationPeerConfig(); - rpc1.setClusterKey(KEY_ONE); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); final TableName tableName1 = TableName.valueOf(tableName.getNameAsString() + "t1"); final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "t2"); final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "t3"); final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "t4"); // Add a valid peer - admin.addReplicationPeer(ID_ONE, rpc1).join(); - rpc1.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc1).join(); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); Map> tableCFs = new HashMap<>(); try { @@ -369,30 +368,28 @@ public void testSetPeerNamespaces() throws Exception { String ns1 = "ns1"; String ns2 = "ns2"; - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(KEY_ONE); - admin.addReplicationPeer(ID_ONE, rpc).join(); - rpc.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); // add ns1 and ns2 to peer config - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); Set namespaces = new HashSet<>(); namespaces.add(ns1); namespaces.add(ns2); - rpc.setNamespaces(namespaces); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + rpcBuilder.setNamespaces(namespaces); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); namespaces = admin.getReplicationPeerConfig(ID_ONE).get().getNamespaces(); assertEquals(2, namespaces.size()); assertTrue(namespaces.contains(ns1)); assertTrue(namespaces.contains(ns2)); // update peer config only contains ns1 - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); namespaces = new HashSet<>(); namespaces.add(ns1); - rpc.setNamespaces(namespaces); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + rpcBuilder.setNamespaces(namespaces); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); namespaces = admin.getReplicationPeerConfig(ID_ONE).get().getNamespaces(); assertEquals(1, namespaces.size()); assertTrue(namespaces.contains(ns1)); @@ -407,40 +404,36 @@ public void testNamespacesAndTableCfsConfigConflict() throws Exception { final TableName tableName1 = TableName.valueOf(ns1 + ":" + tableName.getNameAsString() + "1"); final TableName tableName2 = TableName.valueOf(ns2 + ":" + tableName.getNameAsString() + "2"); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(KEY_ONE); - admin.addReplicationPeer(ID_ONE, rpc).join(); - rpc.setReplicateAllUserTables(false); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + rpcBuilder.setReplicateAllUserTables(false); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); Set namespaces = new HashSet(); namespaces.add(ns1); - rpc.setNamespaces(namespaces); - admin.updateReplicationPeerConfig(ID_ONE, rpc).get(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); + rpcBuilder.setNamespaces(namespaces); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).get(); Map> tableCfs = new HashMap<>(); tableCfs.put(tableName1, new ArrayList<>()); - rpc.setTableCFsMap(tableCfs); + rpcBuilder.setTableCFsMap(tableCfs); try { - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); fail( "Test case should fail, because table " + tableName1 + " conflict with namespace " + ns1); } catch (CompletionException e) { // OK } - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); tableCfs.clear(); tableCfs.put(tableName2, new ArrayList<>()); - rpc.setTableCFsMap(tableCfs); - admin.updateReplicationPeerConfig(ID_ONE, rpc).get(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); + rpcBuilder.setTableCFsMap(tableCfs); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).get(); namespaces.clear(); namespaces.add(ns2); - rpc.setNamespaces(namespaces); + rpcBuilder.setNamespaces(namespaces); try { - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); fail( "Test case should fail, because namespace " + ns2 + " conflict with table " + tableName2); } catch (CompletionException e) { @@ -452,15 +445,14 @@ public void testNamespacesAndTableCfsConfigConflict() throws Exception { @Test public void testPeerBandwidth() throws Exception { - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(KEY_ONE); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); - admin.addReplicationPeer(ID_ONE, rpc).join(); - rpc = admin.getReplicationPeerConfig(ID_ONE).get(); - assertEquals(0, rpc.getBandwidth()); + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join();; + assertEquals(0, admin.getReplicationPeerConfig(ID_ONE).get().getBandwidth()); - rpc.setBandwidth(2097152); - admin.updateReplicationPeerConfig(ID_ONE, rpc).join(); + rpcBuilder.setBandwidth(2097152); + admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); assertEquals(2097152, admin.getReplicationPeerConfig(ID_ONE).join().getBandwidth()); admin.removeReplicationPeer(ID_ONE).join(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java index 1fb9df66abb0..c9599630ba76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java @@ -24,8 +24,8 @@ import static org.junit.Assert.fail; import java.io.IOException; -import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CompletionException; import java.util.concurrent.ForkJoinPool; @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -86,8 +87,8 @@ public static void setUpBeforeClass() throws Exception { ConnectionFactory.createAsyncConnection(TEST_UTIL2.getConfiguration()).get(); admin2 = connection.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(TEST_UTIL2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(TEST_UTIL2.getClusterKey()).build(); ASYNC_CONN.getAdmin().addReplicationPeer(ID_SECOND, rpc).join(); } @@ -231,30 +232,30 @@ public void testEnableReplicationForExplicitSetTableCfs() throws Exception { assertFalse("Table should not exists in the peer cluster", admin2.tableExists(tableName2).get()); - Map> tableCfs = new HashMap<>(); + Map> tableCfs = new HashMap<>(); tableCfs.put(tableName, null); - ReplicationPeerConfig rpc = admin.getReplicationPeerConfig(ID_SECOND).get(); - rpc.setReplicateAllUserTables(false); - rpc.setTableCFsMap(tableCfs); + ReplicationPeerConfigBuilder rpcBuilder = ReplicationPeerConfig + .newBuilder(admin.getReplicationPeerConfig(ID_SECOND).get()) + .setReplicateAllUserTables(false) + .setTableCFsMap(tableCfs); try { // Only add tableName to replication peer config - admin.updateReplicationPeerConfig(ID_SECOND, rpc).join(); + admin.updateReplicationPeerConfig(ID_SECOND, rpcBuilder.build()).join(); admin.enableTableReplication(tableName2).join(); assertFalse("Table should not be created if user has set table cfs explicitly for the " + "peer and this is not part of that collection", admin2.tableExists(tableName2).get()); // Add tableName2 to replication peer config, too tableCfs.put(tableName2, null); - rpc.setTableCFsMap(tableCfs); - admin.updateReplicationPeerConfig(ID_SECOND, rpc).join(); + rpcBuilder.setTableCFsMap(tableCfs); + admin.updateReplicationPeerConfig(ID_SECOND, rpcBuilder.build()).join(); admin.enableTableReplication(tableName2).join(); assertTrue( "Table should be created if user has explicitly added table into table cfs collection", admin2.tableExists(tableName2).get()); } finally { - rpc.setTableCFsMap(null); - rpc.setReplicateAllUserTables(true); - admin.updateReplicationPeerConfig(ID_SECOND, rpc).join(); + rpcBuilder.setTableCFsMap(null).setReplicateAllUserTables(true).build(); + admin.updateReplicationPeerConfig(ID_SECOND, rpcBuilder.build()).join(); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 491612c6be95..99180ec8bad1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -395,8 +395,8 @@ public void testReplicaAndReplication() throws Exception { try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Admin admin = connection.getAdmin()) { - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(HTU2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(HTU2.getClusterKey()).build(); admin.addReplicationPeer("2", rpc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index b2e0e6d4860e..9baa600ca10b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -623,7 +623,8 @@ private void addPeer(String id, int masterClusterNumber, try (Connection conn = ConnectionFactory.createConnection(configurations[masterClusterNumber]); Admin admin = conn.getAdmin()) { admin.addReplicationPeer(id, - new ReplicationPeerConfig().setClusterKey(utilities[slaveClusterNumber].getClusterKey())); + ReplicationPeerConfig.newBuilder(). + setClusterKey(utilities[slaveClusterNumber].getClusterKey()).build()); } } @@ -633,9 +634,10 @@ private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber, Admin admin = conn.getAdmin()) { admin.addReplicationPeer( id, - new ReplicationPeerConfig().setClusterKey(utilities[slaveClusterNumber].getClusterKey()) - .setReplicateAllUserTables(false) - .setTableCFsMap(ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs))); + ReplicationPeerConfig.newBuilder() + .setClusterKey(utilities[slaveClusterNumber].getClusterKey()) + .setReplicateAllUserTables(false) + .setTableCFsMap(ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs)).build()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index 322db2e9e3ba..b3e4a1f4f5f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -145,9 +145,9 @@ public void testMultiSlaveReplication() throws Exception { Table htable2 = utility2.getConnection().getTable(tableName); Table htable3 = utility3.getConnection().getTable(tableName); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility2.getClusterKey()); - admin1.addReplicationPeer("1", rpc); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()); + admin1.addReplicationPeer("1", rpcBuilder.build()); // put "row" and wait 'til it got around, then delete putAndWait(row, famName, htable1, htable2); @@ -163,9 +163,8 @@ public void testMultiSlaveReplication() throws Exception { // after the log was rolled put a new row putAndWait(row3, famName, htable1, htable2); - rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility3.getClusterKey()); - admin1.addReplicationPeer("2", rpc); + rpcBuilder.setClusterKey(utility3.getClusterKey()); + admin1.addReplicationPeer("2", rpcBuilder.build()); // put a row, check it was replicated to all clusters putAndWait(row1, famName, htable1, htable2, htable3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index 5a6ac0c48745..b972c5f3cf3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -142,8 +142,10 @@ public String explainFailure() throws Exception { public void testCustomReplicationEndpoint() throws Exception { // test installing a custom replication endpoint other than the default one. hbaseAdmin.addReplicationPeer("testCustomReplicationEndpoint", - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()) + .build()); // check whether the class has been constructed and started Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { @@ -184,8 +186,10 @@ public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception { int peerCount = hbaseAdmin.listReplicationPeers().size(); final String id = "testReplicationEndpointReturnsFalseOnReplicate"; hbaseAdmin.addReplicationPeer(id, - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName())); + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()) + .build()); // This test is flakey and then there is so much stuff flying around in here its, hard to // debug. Peer needs to be up for the edit to make it across. This wait on // peer count seems to be a hack that has us not progress till peer is up. @@ -236,8 +240,10 @@ public void testInterClusterReplication() throws Exception { } hbaseAdmin.addReplicationPeer(id, - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) - .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) + .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()) + .build()); final int numEdits = totEdits; Waiter.waitFor(CONF1, 30000, new Waiter.ExplainingPredicate() { @@ -260,13 +266,15 @@ public String explainFailure() throws Exception { @Test public void testWALEntryFilterFromReplicationEndpoint() throws Exception { - ReplicationPeerConfig rpc = - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()); - // test that we can create mutliple WALFilters reflectively - rpc.getConfiguration().put(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - EverythingPassesWALEntryFilter.class.getName() + "," + - EverythingPassesWALEntryFilterSubclass.class.getName()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + EverythingPassesWALEntryFilter.class.getName() + "," + + EverythingPassesWALEntryFilterSubclass.class.getName()) + .build(); + hbaseAdmin.addReplicationPeer("testWALEntryFilterFromReplicationEndpoint", rpc); // now replicate some data. try (Connection connection = ConnectionFactory.createConnection(CONF1)) { @@ -290,23 +298,25 @@ public boolean evaluate() throws Exception { @Test(expected = IOException.class) public void testWALEntryFilterAddValidation() throws Exception { - ReplicationPeerConfig rpc = - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()); - // test that we can create mutliple WALFilters reflectively - rpc.getConfiguration().put(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter"); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); hbaseAdmin.addReplicationPeer("testWALEntryFilterAddValidation", rpc); } @Test(expected = IOException.class) public void testWALEntryFilterUpdateValidation() throws Exception { - ReplicationPeerConfig rpc = - new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()); - // test that we can create mutliple WALFilters reflectively - rpc.getConfiguration().put(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter"); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); hbaseAdmin.updateReplicationPeerConfig("testWALEntryFilterUpdateValidation", rpc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index d416e09f2554..d61966f70a78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -143,8 +143,8 @@ public static void setUpBeforeClass() throws Exception { connection1 = ConnectionFactory.createConnection(conf1); replicationAdmin = connection1.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(utility2.getClusterKey()).build(); replicationAdmin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java index 1538fa360093..f528bdaad097 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java @@ -101,8 +101,8 @@ public static void setUpBeforeClass() throws Exception { utility2.setZkCluster(miniZK); new ZKWatcher(conf2, "cluster2", null, true); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(utility2.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(utility2.getClusterKey()).build(); utility1.startMiniCluster(); utility2.startMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index bfdbb8864726..ce47f0b71481 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -71,8 +71,8 @@ public void testReplicatorBatching() throws Exception { // Replace the peer set up for us by the base class with a wrapper for this test hbaseAdmin.addReplicationPeer("testReplicatorBatching", - new ReplicationPeerConfig().setClusterKey(UTIL2.getClusterKey()) - .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()).build()); ReplicationEndpointForTest.setBatchCount(0); ReplicationEndpointForTest.setEntriesCount(0); @@ -120,8 +120,9 @@ public void testReplicatorWithErrors() throws Exception { // Replace the peer set up for us by the base class with a wrapper for this test hbaseAdmin.addReplicationPeer("testReplicatorWithErrors", - new ReplicationPeerConfig().setClusterKey(UTIL2.getClusterKey()) - .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName())); + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName()) + .build()); FailureInjectingReplicationEndpointForTest.setBatchCount(0); FailureInjectingReplicationEndpointForTest.setEntriesCount(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 17276173ec70..905cb48fe77c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -3056,7 +3056,8 @@ public void testUpdateReplicationPeerConfig() throws Exception { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preUpdateReplicationPeerConfig( - ObserverContextImpl.createAndPrepare(CP_ENV), "test", new ReplicationPeerConfig()); + ObserverContextImpl.createAndPrepare(CP_ENV), "test", + ReplicationPeerConfig.newBuilder().build()); return null; } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 10b8cf56106c..31f219c36c12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -137,8 +137,9 @@ public void setup() throws Exception { TEST_UTIL1.startMiniCluster(1); admin = TEST_UTIL.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(TEST_UTIL1.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(TEST_UTIL1.getClusterKey()) + .build(); admin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index 012c9aad5676..b843f6e3bff4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -190,8 +190,8 @@ public void setup() throws Exception { TEST_UTIL1.startMiniCluster(1); admin = TEST_UTIL.getAdmin(); - ReplicationPeerConfig rpc = new ReplicationPeerConfig(); - rpc.setClusterKey(TEST_UTIL1.getClusterKey()); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(TEST_UTIL1.getClusterKey()).build(); admin.addReplicationPeer("2", rpc); Admin hBaseAdmin = TEST_UTIL.getAdmin(); From ebebf28d9f6aadb0ad6c17920c7a112e53e3fba7 Mon Sep 17 00:00:00 2001 From: Aman Poonia Date: Tue, 26 Jan 2021 11:55:12 +0530 Subject: [PATCH 659/769] HBASE-25523 Region normalizer chore thread is getting killed (#2903) Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani --- .../master/normalizer/SimpleRegionNormalizer.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 1675e049d77d..61bc922c5ac4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Size; import org.apache.hadoop.hbase.TableName; @@ -225,8 +226,16 @@ public List computePlansForTable(final TableName table) { private long getRegionSizeMB(RegionInfo hri) { ServerName sn = masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); - RegionMetrics regionLoad = - masterServices.getServerManager().getLoad(sn).getRegionMetrics().get(hri.getRegionName()); + if (sn == null) { + LOG.debug("{} region was not found on any Server", hri.getRegionNameAsString()); + return -1; + } + ServerMetrics serverMetrics = masterServices.getServerManager().getLoad(sn); + if (serverMetrics == null) { + LOG.debug("server {} was not found in ServerManager", sn.getServerName()); + return -1; + } + RegionMetrics regionLoad = serverMetrics.getRegionMetrics().get(hri.getRegionName()); if (regionLoad == null) { LOG.debug("{} was not found in RegionsLoad", hri.getRegionNameAsString()); return -1; From 64847926ee4fcb6efea80c67abb8ad1f23bf18f9 Mon Sep 17 00:00:00 2001 From: Mallikarjun Date: Tue, 26 Jan 2021 12:08:55 +0530 Subject: [PATCH 660/769] HBASE-25501 BugFix: Unused backup bandwidth and workers parameter (#2886) Signed-off-by: Viraj Jasani --- .../backup/impl/FullTableBackupClient.java | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index c0103f5db31f..5bf1373a6e53 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -87,11 +88,21 @@ protected void snapshotCopy(BackupInfo backupInfo) throws Exception { // calculate the real files' size for the percentage in the future. // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); int res; - String[] args = new String[4]; - args[0] = "-snapshot"; - args[1] = backupInfo.getSnapshotName(table); - args[2] = "-copy-to"; - args[3] = backupInfo.getTableBackupDir(table); + ArrayList argsList = new ArrayList<>(); + argsList.add("-snapshot"); + argsList.add(backupInfo.getSnapshotName(table)); + argsList.add("-copy-to"); + argsList.add(backupInfo.getTableBackupDir(table)); + if (backupInfo.getBandwidth() > -1) { + argsList.add("-bandwidth"); + argsList.add(String.valueOf(backupInfo.getBandwidth())); + } + if (backupInfo.getWorkers() > -1) { + argsList.add("-mappers"); + argsList.add(String.valueOf(backupInfo.getWorkers())); + } + + String[] args = argsList.toArray(new String[0]); String jobname = "Full-Backup_" + backupInfo.getBackupId() + "_" + table.getNameAsString(); if (LOG.isDebugEnabled()) { From 458f9e063654f1d5d6c0f64654628694bdf19760 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Tue, 26 Jan 2021 11:21:02 -0800 Subject: [PATCH 661/769] HBASE-25532 Add 2.4.1 to the downloads page Signed-off-by: Andrew Purtell --- src/site/xdoc/downloads.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml index a4123ba5d559..72cbeb42e3e7 100644 --- a/src/site/xdoc/downloads.xml +++ b/src/site/xdoc/downloads.xml @@ -45,24 +45,24 @@ under the License. - 2.4.0 + 2.4.1 - 2020/12/15 + 2021/01/26 - 2.4.0 vs 2.3.0 + 2.4.1 vs 2.4.0 - Changes + Changes - Release Notes + Release Notes - src (sha512 asc)
    - bin (sha512 asc)
    - client-bin (sha512 asc) + src (sha512 asc)
    + bin (sha512 asc)
    + client-bin (sha512 asc) From 73bd4db57ae8a678b8e1a4dfdfd7404541520f42 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Tue, 26 Jan 2021 16:14:53 -0800 Subject: [PATCH 662/769] HBASE-25531 Minor improvement to Profiler Servlet doc (#2905) Signed-off-by: Sean Busbey --- src/main/asciidoc/_chapters/profiler.adoc | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/main/asciidoc/_chapters/profiler.adoc b/src/main/asciidoc/_chapters/profiler.adoc index 522cc7deed6d..9c9911ce7519 100644 --- a/src/main/asciidoc/_chapters/profiler.adoc +++ b/src/main/asciidoc/_chapters/profiler.adoc @@ -29,20 +29,26 @@ == Background -HBASE-21926 introduced a new servlet that supports integrated profiling via async-profiler. +https://issues.apache.org/jira/browse/HBASE-21926[HBASE-21926] introduced a new servlet that +supports integrated, on-demand profiling via the +https://github.com/jvm-profiling-tools/async-profiler[Async Profiler] project. == Prerequisites -Go to https://github.com/jvm-profiling-tools/async-profiler, download a release appropriate for your platform, and install on every cluster host. -If 4.6 or later linux, be sure to set proc variables as per 'Basic Usage' section in the -Async Profiler Home Page -(Not doing this will draw you diagrams with no content). +Go to the https://github.com/jvm-profiling-tools/async-profiler[Async Profiler Home Page], download +a release appropriate for your platform, and install on every cluster host. If running a Linux +kernel v4.6 or later, be sure to set proc variables as per the +https://github.com/jvm-profiling-tools/async-profiler#basic-usage[Basic Usage] section. Not doing +so will result in flame graphs that contain no content. -Set `ASYNC_PROFILER_HOME` in the environment (put it in hbase-env.sh) to the root directory of the async-profiler install location, or pass it on the HBase daemon's command line as a system property as `-Dasync.profiler.home=/path/to/async-profiler`. +Set `ASYNC_PROFILER_HOME` in the environment (put it in hbase-env.sh) to the root directory of the +async-profiler install location, or pass it on the HBase daemon's command line as a system property +as `-Dasync.profiler.home=/path/to/async-profiler`. == Usage -Once the prerequisites are satisfied, access to async-profiler is available by way of the HBase UI or direct interaction with the infoserver. +Once the prerequisites are satisfied, access to async-profiler is available by way of the HBase UI +or direct interaction with the infoserver. Examples: From 13ee9693ad9a7ccb5bce2b0c2ee6a609b0765951 Mon Sep 17 00:00:00 2001 From: Bo Cui Date: Thu, 28 Jan 2021 22:55:05 +0800 Subject: [PATCH 663/769] HBASE-25506 ServerManager#startChore affects MTTR of HMaster (#2889) Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/master/ServerManager.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index f91f04000cd1..7bbfd0bb55d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -921,8 +921,13 @@ public boolean isClusterShutdown() { public void startChore() { Configuration c = master.getConfiguration(); if (persistFlushedSequenceId) { - // when reach here, RegionStates should loaded, firstly, we call remove deleted regions - removeDeletedRegionFromLoadedFlushedSequenceIds(); + new Thread(() -> { + // after AM#loadMeta, RegionStates should be loaded, and some regions are + // deleted by drop/split/merge during removeDeletedRegionFromLoadedFlushedSequenceIds, + // but these deleted regions are not added back to RegionStates, + // so we can safely remove deleted regions. + removeDeletedRegionFromLoadedFlushedSequenceIds(); + }, "RemoveDeletedRegionSyncThread").start(); int flushPeriod = c.getInt(FLUSHEDSEQUENCEID_FLUSHER_INTERVAL, FLUSHEDSEQUENCEID_FLUSHER_INTERVAL_DEFAULT); flushedSeqIdFlusher = new FlushedSequenceIdFlusher( From c58ac71e36b35297724fea37739d0e8b45d71847 Mon Sep 17 00:00:00 2001 From: Bharath Vissapragada Date: Thu, 28 Jan 2021 09:33:35 -0800 Subject: [PATCH 664/769] HBASE-25528: Dedicated merge dispatch threadpool on master (#2904) Adds "hbase.master.executor.merge.dispatch.threads" and defaults to 2. Also adds additional logging that includes the number of split plans and merge plans computed for each normalizer run. Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../java/org/apache/hadoop/hbase/HConstants.java | 7 +++++++ .../org/apache/hadoop/hbase/executor/EventType.java | 2 +- .../apache/hadoop/hbase/executor/ExecutorType.java | 1 + .../org/apache/hadoop/hbase/master/HMaster.java | 3 +++ .../master/normalizer/SimpleRegionNormalizer.java | 13 ++++++++++--- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index d31cadd85299..48fa00caaa14 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1605,6 +1605,13 @@ public enum OperationStatusCode { "hbase.master.executor.serverops.threads"; public static final int MASTER_SERVER_OPERATIONS_THREADS_DEFAULT = 5; + /** + * Number of threads used to dispatch merge operations to the regionservers. + */ + public static final String MASTER_MERGE_DISPATCH_THREADS = + "hbase.master.executor.merge.dispatch.threads"; + public static final int MASTER_MERGE_DISPATCH_THREADS_DEFAULT = 2; + public static final String MASTER_META_SERVER_OPERATIONS_THREADS = "hbase.master.executor.meta.serverops.threads"; public static final int MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT = 5; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index a67447940b9d..600c96cc0267 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -152,7 +152,7 @@ public enum EventType { * C_M_MERGE_REGION
    * Client asking Master to merge regions. */ - C_M_MERGE_REGION (30, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_MERGE_REGION (30, ExecutorType.MASTER_MERGE_OPERATIONS), /** * Messages originating from Client to Master.
    * C_M_DELETE_TABLE
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index d06bd54484d7..36958c518a68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -35,6 +35,7 @@ public enum ExecutorType { MASTER_META_SERVER_OPERATIONS (6), M_LOG_REPLAY_OPS (7), MASTER_SNAPSHOT_OPERATIONS (8), + MASTER_MERGE_OPERATIONS (9), // RegionServer executor services RS_OPEN_REGION (20), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 94f3bf2bfda7..9911f014d639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1324,6 +1324,9 @@ private void startServiceThreads() throws IOException { HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT)); this.executorService.startExecutorService(ExecutorType.MASTER_SNAPSHOT_OPERATIONS, conf.getInt( SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT)); + this.executorService.startExecutorService(ExecutorType.MASTER_MERGE_OPERATIONS, conf.getInt( + HConstants.MASTER_MERGE_DISPATCH_THREADS, + HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT)); // We depend on there being only one instance of this executor running // at a time. To do concurrency, would need fencing of enable/disable of diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 61bc922c5ac4..52455686895f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -209,14 +209,21 @@ public List computePlansForTable(final TableName table) { ctx.getTableRegions().size()); final List plans = new ArrayList<>(); + int splitPlansCount = 0; if (proceedWithSplitPlanning) { - plans.addAll(computeSplitNormalizationPlans(ctx)); + List splitPlans = computeSplitNormalizationPlans(ctx); + splitPlansCount = splitPlans.size(); + plans.addAll(splitPlans); } + int mergePlansCount = 0; if (proceedWithMergePlanning) { - plans.addAll(computeMergeNormalizationPlans(ctx)); + List mergePlans = computeMergeNormalizationPlans(ctx); + mergePlansCount = mergePlans.size(); + plans.addAll(mergePlans); } - LOG.debug("Computed {} normalization plans for table {}", plans.size(), table); + LOG.debug("Computed normalization plans for table {}. Total plans: {}, split plans: {}, " + + "merge plans: {}", table, plans.size(), splitPlansCount, mergePlansCount); return plans; } From 767b87dd6de618b251bb981ae88f7dbf25f52ba0 Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Fri, 29 Jan 2021 04:17:30 -0800 Subject: [PATCH 665/769] [HBASE-25536] Remove 0 length wal file from logQueue if it belongs to old sources (#2908) Signed-off-by: Wellington Chevreuil Signed-off-by: Geoffrey Jacoby Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani --- .../ReplicationSourceWALReader.java | 4 ++- .../regionserver/TestWALEntryStream.java | 30 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index a6d87870b495..be262a6d9504 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -247,8 +247,10 @@ private void handleEmptyWALEntryBatch() throws InterruptedException { // (highly likely we've closed the current log), we've hit the max retries, and autorecovery is // enabled, then dump the log private void handleEofException(IOException e) { + // Dump the log even if logQueue size is 1 if the source is from recovered Source + // since we don't add current log to recovered source queue so it is safe to remove. if ((e instanceof EOFException || e.getCause() instanceof EOFException) && - logQueue.size() > 1 && this.eofAutoRecovery) { + (source.isRecovered() || logQueue.size() > 1) && this.eofAutoRecovery) { try { if (fs.getFileStatus(logQueue.peek()).getLen() == 0) { LOG.warn("Forcing removal of 0 length log in queue: " + logQueue.peek()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java index 63e7a8b90496..1db9c175e922 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java @@ -42,6 +42,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -652,4 +653,33 @@ public void testReadBeyondCommittedLength() throws IOException, InterruptedExcep assertFalse(entryStream.hasNext()); } } + + /* + Test removal of 0 length log from logQueue if the source is a recovered source and + size of logQueue is only 1. + */ + @Test + public void testEOFExceptionForRecoveredQueue() throws Exception { + PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); + // Create a 0 length log. + Path emptyLog = new Path("emptyLog"); + FSDataOutputStream fsdos = fs.create(emptyLog); + fsdos.close(); + assertEquals(0, fs.getFileStatus(emptyLog).getLen()); + queue.add(emptyLog); + + Configuration conf = new Configuration(CONF); + // Override the max retries multiplier to fail fast. + conf.setInt("replication.source.maxretriesmultiplier", 1); + conf.setBoolean("replication.source.eof.autorecovery", true); + // Create a reader thread with source as recovered source. + ReplicationSource source = mockReplicationSource(true, conf); + when(source.isPeerEnabled()).thenReturn(true); + ReplicationSourceWALReader reader = + new ReplicationSourceWALReader(fs, conf, queue, 0, getDummyFilter(), source); + reader.run(); + // ReplicationSourceWALReaderThread#handleEofException method will + // remove empty log from logQueue. + assertEquals(0, queue.size()); + } } From e387028d59c5a2428bc2b3870d2d9566f9f3f8e2 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 29 Jan 2021 23:27:31 +0800 Subject: [PATCH 666/769] HBASE-25533 The metadata of the table and family should not be an empty string (#2906) Signed-off-by: Viraj Jasani Signed-off-by: Geoffrey Jacoby --- .../client/ColumnFamilyDescriptorBuilder.java | 4 +- .../hbase/client/TableDescriptorBuilder.java | 6 +-- .../TestColumnFamilyDescriptorBuilder.java | 19 ++++++++ .../client/TestTableDescriptorBuilder.java | 18 +++++++ hbase-shell/src/test/ruby/hbase/admin_test.rb | 47 +++++++++++++++++++ 5 files changed, 89 insertions(+), 5 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 9a47cb52fa95..7afc3872b465 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -677,7 +677,7 @@ private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { * @return this (for chained invocation) */ private ModifyableColumnFamilyDescriptor setValue(Bytes key, Bytes value) { - if (value == null) { + if (value == null || value.getLength() == 0) { values.remove(key); } else { values.put(key, value); @@ -1228,7 +1228,7 @@ public Map getConfiguration() { * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setConfiguration(String key, String value) { - if (value == null) { + if (value == null || value.length() == 0) { configuration.remove(key); } else { configuration.put(key, value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index d98386817148..2581ccea758b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -701,7 +701,7 @@ public ModifyableTableDescriptor setValue(String key, String value) { toBytesOrNull(value, Bytes::toBytes)); } - /* + /** * @param key The key. * @param value The value. If null, removes the setting. */ @@ -710,14 +710,14 @@ private ModifyableTableDescriptor setValue(final Bytes key, return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } - /* + /** * Setter for storing metadata as a (key, value) pair in {@link #values} map * * @param key The key. * @param value The value. If null, removes the setting. */ public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { - if (value == null) { + if (value == null || value.getLength() == 0) { values.remove(key); } else { values.put(key, value); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index 557d2f8dfb6e..7528d24705cf 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; @@ -210,6 +211,24 @@ public void testDefaultBuilder() { KeepDeletedCells.FALSE.toString()); assertEquals(defaultValueMap.get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING), DataBlockEncoding.NONE.toString()); + } + @Test + public void testSetEmptyValue() { + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY); + String testConf = "TestConfiguration"; + String testValue = "TestValue"; + // test set value + builder.setValue(testValue, "2"); + assertEquals("2", Bytes.toString(builder.build().getValue(Bytes.toBytes(testValue)))); + builder.setValue(testValue, ""); + assertNull(builder.build().getValue(Bytes.toBytes(testValue))); + + // test set configuration + builder.setConfiguration(testConf, "1"); + assertEquals("1", builder.build().getConfigurationValue(testConf)); + builder.setConfiguration(testConf, ""); + assertNull(builder.build().getConfigurationValue(testConf)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 43824afe8107..05a0b31d1e8a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -369,4 +369,22 @@ public void testGetSetRegionServerGroup() { htd = TableDescriptorBuilder.newBuilder(htd).setRegionServerGroup(null).build(); assertNull(htd.getValue(RSGroupInfo.TABLE_DESC_PROP_GROUP)); } + + @Test + public void testSetEmptyValue() { + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + String testValue = "TestValue"; + // test setValue + builder.setValue(testValue, "2"); + assertEquals("2", builder.build().getValue(testValue)); + builder.setValue(testValue, ""); + assertNull(builder.build().getValue(Bytes.toBytes(testValue))); + + // test setFlushPolicyClassName + builder.setFlushPolicyClassName("class"); + assertEquals("class", builder.build().getFlushPolicyClassName()); + builder.setFlushPolicyClassName(""); + assertNull(builder.build().getFlushPolicyClassName()); + } } diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index 64a4a8b425c6..309624ae1808 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -1013,6 +1013,21 @@ def teardown assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) end + define_test "alter should be able to remove a list of table attributes when value is empty" do + drop_test_table(@test_name) + + key_1 = "TestAttr1" + key_2 = "TestAttr2" + command(:create, @test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 }) + + # eval() is used to convert a string to regex + assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + + command(:alter, @test_name, METADATA => { key_1 => '', key_2 => '' }) + assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + end define_test "alter should raise error trying to remove nonexistent attributes" do drop_test_table(@test_name) @@ -1064,6 +1079,38 @@ def teardown assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) end + define_test "alter should be able to remove a list of table configuration when value is empty" do + drop_test_table(@test_name) + + key_1 = "TestConf1" + key_2 = "TestConf2" + command(:create, @test_name, { NAME => 'i'}, CONFIGURATION => { key_1 => 1, key_2 => 2 }) + + # eval() is used to convert a string to regex + assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + + command(:alter, @test_name, CONFIGURATION => { key_1 => '', key_2 => '' }) + assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + end + + define_test "alter should be able to remove a list of column family configuration when value is empty" do + drop_test_table(@test_name) + + key_1 = "TestConf1" + key_2 = "TestConf2" + command(:create, @test_name, { NAME => 'i', CONFIGURATION => { key_1 => 1, key_2 => 2 }}) + + # eval() is used to convert a string to regex + assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + + command(:alter, @test_name, { NAME => 'i', CONFIGURATION => { key_1 => '', key_2 => '' }}) + assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) + assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) + end + define_test "alter should raise error trying to remove nonexistent configurations" do drop_test_table(@test_name) create_test_table(@test_name) From 56a70f9ad02f93fe1af4f80b13f4a7d01f0479dd Mon Sep 17 00:00:00 2001 From: SWH12 <34267571+SWH12@users.noreply.github.com> Date: Sun, 31 Jan 2021 06:13:45 +0800 Subject: [PATCH 667/769] HBASE-25498 Add a comment when configuring HTTPS (#2913) Incomplete configuration steps when using Secure HTTP (HTTPS) for the Web UI --- src/main/asciidoc/_chapters/security.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/asciidoc/_chapters/security.adoc b/src/main/asciidoc/_chapters/security.adoc index 7953b75b6a37..4678072145b1 100644 --- a/src/main/asciidoc/_chapters/security.adoc +++ b/src/main/asciidoc/_chapters/security.adoc @@ -44,7 +44,7 @@ HBase provides mechanisms to secure various components and aspects of HBase and === Using Secure HTTP (HTTPS) for the Web UI A default HBase install uses insecure HTTP connections for Web UIs for the master and region servers. -To enable secure HTTP (HTTPS) connections instead, set `hbase.ssl.enabled` to `true` in _hbase-site.xml_. +To enable secure HTTP (HTTPS) connections instead, set `hbase.ssl.enabled` to `true` in _hbase-site.xml_(Please prepare SSL certificate and ssl configuration file in advance). This does not change the port used by the Web UI. To change the port for the web UI for a given HBase component, configure that port's setting in hbase-site.xml. These settings are: From e8de13be208841507008051e6acc07c7895fb85c Mon Sep 17 00:00:00 2001 From: Pankaj Date: Mon, 1 Feb 2021 10:37:57 +0530 Subject: [PATCH 668/769] HBASE-24900 Make retain assignment configurable during SCP (#2313) Retain assignment will be useful in non-cloud scenario where RegionServer and Datanode are deployed in same machine and will avoid remote read. Signed-off-by: Guanghao Zhang Signed-off-by: Anoop Sam John --- .../TransitRegionStateProcedure.java | 9 +- .../procedure/ServerCrashProcedure.java | 23 +- .../master/TestRetainAssignmentOnRestart.java | 240 ++++++++++++++++++ ...tainAssignmentOnRestartSplitWithoutZk.java | 38 +++ 4 files changed, 300 insertions(+), 10 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java index 56e3215d3ab2..8ca1ee482e81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java @@ -414,13 +414,8 @@ public void reportTransition(MasterProcedureEnv env, RegionStateNode regionNode, // Should be called with RegionStateNode locked public void serverCrashed(MasterProcedureEnv env, RegionStateNode regionNode, - ServerName serverName) throws IOException { - // force to assign to a new candidate server - // AssignmentManager#regionClosedAbnormally will set region location to null - // TODO: the forceNewPlan flag not be persistent so if master crash then the flag will be lost. - // But assign to old server is not big deal because it not effect correctness. - // See HBASE-23035 for more details. - forceNewPlan = true; + ServerName serverName, boolean forceNewPlan) throws IOException { + this.forceNewPlan = forceNewPlan; if (remoteProc != null) { // this means we are waiting for the sub procedure, so wake it up remoteProc.serverCrashed(env, regionNode, serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 655974489f68..e7fba555c9cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -65,6 +65,21 @@ public class ServerCrashProcedure implements ServerProcedureInterface { private static final Logger LOG = LoggerFactory.getLogger(ServerCrashProcedure.class); + /** + * Configuration parameter to enable/disable the retain region assignment during + * ServerCrashProcedure. + *

    + * By default retain assignment is disabled which makes the failover faster and improve the + * availability; useful for cloud scenario where region block locality is not important. Enable + * this when RegionServers are deployed on same host where Datanode are running, this will improve + * read performance due to local read. + *

    + * see HBASE-24900 for more details. + */ + public static final String MASTER_SCP_RETAIN_ASSIGNMENT = "hbase.master.scp.retain.assignment"; + /** Default value of {@link #MASTER_SCP_RETAIN_ASSIGNMENT} */ + public static final boolean DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT = false; + /** * Name of the crashed server to process. */ @@ -486,6 +501,8 @@ protected boolean isMatchingRegionLocation(RegionStateNode rsn) { */ private void assignRegions(MasterProcedureEnv env, List regions) throws IOException { AssignmentManager am = env.getMasterServices().getAssignmentManager(); + boolean retainAssignment = env.getMasterConfiguration().getBoolean(MASTER_SCP_RETAIN_ASSIGNMENT, + DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT); for (RegionInfo region : regions) { RegionStateNode regionNode = am.getRegionStates().getOrCreateRegionStateNode(region); regionNode.lock(); @@ -512,7 +529,8 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr } if (regionNode.getProcedure() != null) { LOG.info("{} found RIT {}; {}", this, regionNode.getProcedure(), regionNode); - regionNode.getProcedure().serverCrashed(env, regionNode, getServerName()); + regionNode.getProcedure().serverCrashed(env, regionNode, getServerName(), + !retainAssignment); continue; } if (env.getMasterServices().getTableStateManager() @@ -531,9 +549,8 @@ private void assignRegions(MasterProcedureEnv env, List regions) thr LOG.warn("Found table disabled for region {}, procDetails: {}", regionNode, this); continue; } - // force to assign to a new candidate server, see HBASE-23035 for more details. TransitRegionStateProcedure proc = - TransitRegionStateProcedure.assign(env, region, true, null); + TransitRegionStateProcedure.assign(env, region, !retainAssignment, null); regionNode.setProcedure(proc); addChildProcedure(proc); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java new file mode 100644 index 000000000000..ee4da743d215 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRetainAssignmentOnRestart extends AbstractTestRestartCluster { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRetainAssignmentOnRestart.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestRetainAssignmentOnRestart.class); + + private static int NUM_OF_RS = 3; + + @Override + protected boolean splitWALCoordinatedByZk() { + return true; + } + + /** + * This tests retaining assignments on a cluster restart + */ + @Test + public void testRetainAssignmentOnClusterRestart() throws Exception { + setupCluster(); + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + MiniHBaseCluster cluster = UTIL.getHBaseCluster(); + List threads = cluster.getLiveRegionServerThreads(); + assertEquals(NUM_OF_RS, threads.size()); + int[] rsPorts = new int[NUM_OF_RS]; + for (int i = 0; i < NUM_OF_RS; i++) { + rsPorts[i] = threads.get(i).getRegionServer().getServerName().getPort(); + } + + // We don't have to use SnapshotOfRegionAssignmentFromMeta. We use it here because AM used to + // use it to load all user region placements + SnapshotOfRegionAssignmentFromMeta snapshot = + new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map regionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + for (ServerName serverName : regionToRegionServerMap.values()) { + boolean found = false; // Test only, no need to optimize + for (int k = 0; k < NUM_OF_RS && !found; k++) { + found = serverName.getPort() == rsPorts[k]; + } + assertTrue(found); + } + + LOG.info("\n\nShutting down HBase cluster"); + cluster.stopMaster(0); + cluster.shutdown(); + cluster.waitUntilShutDown(); + + LOG.info("\n\nSleeping a bit"); + Thread.sleep(2000); + + LOG.info("\n\nStarting cluster the second time with the same ports"); + cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 3); + master = cluster.startMaster().getMaster(); + for (int i = 0; i < NUM_OF_RS; i++) { + cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, rsPorts[i]); + cluster.startRegionServer(); + } + + ensureServersWithSamePort(master, rsPorts); + + // Wait till master is initialized and all regions are assigned + for (TableName TABLE : TABLES) { + UTIL.waitTableAvailable(TABLE); + } + UTIL.waitUntilNoRegionsInTransition(60000); + + snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size()); + for (Map.Entry entry : newRegionToRegionServerMap.entrySet()) { + ServerName oldServer = regionToRegionServerMap.get(entry.getKey()); + ServerName currentServer = entry.getValue(); + LOG.info( + "Key=" + entry.getKey() + " oldServer=" + oldServer + ", currentServer=" + currentServer); + assertEquals(entry.getKey().toString(), oldServer.getAddress(), currentServer.getAddress()); + assertNotEquals(oldServer.getStartcode(), currentServer.getStartcode()); + } + } + + /** + * This tests retaining assignments on a single node restart + */ + @Test + public void testRetainAssignmentOnSingleRSRestart() throws Exception { + setupCluster(); + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + MiniHBaseCluster cluster = UTIL.getHBaseCluster(); + List threads = cluster.getLiveRegionServerThreads(); + assertEquals(NUM_OF_RS, threads.size()); + int[] rsPorts = new int[NUM_OF_RS]; + for (int i = 0; i < NUM_OF_RS; i++) { + rsPorts[i] = threads.get(i).getRegionServer().getServerName().getPort(); + } + + // We don't have to use SnapshotOfRegionAssignmentFromMeta. We use it here because AM used to + // use it to load all user region placements + SnapshotOfRegionAssignmentFromMeta snapshot = + new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map regionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + for (ServerName serverName : regionToRegionServerMap.values()) { + boolean found = false; // Test only, no need to optimize + for (int k = 0; k < NUM_OF_RS && !found; k++) { + found = serverName.getPort() == rsPorts[k]; + } + assertTrue(found); + } + + // Server to be restarted + ServerName deadRS = threads.get(0).getRegionServer().getServerName(); + LOG.info("\n\nStopping HMaster and {} server", deadRS); + // Stopping master first so that region server SCP will not be initiated + cluster.stopMaster(0); + cluster.waitForMasterToStop(master.getServerName(), 5000); + cluster.stopRegionServer(deadRS); + + LOG.info("\n\nSleeping a bit"); + Thread.sleep(2000); + + LOG.info("\n\nStarting HMaster and region server {} second time with the same port", deadRS); + cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 3); + master = cluster.startMaster().getMaster(); + cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, deadRS.getPort()); + cluster.startRegionServer(); + + ensureServersWithSamePort(master, rsPorts); + + // Wait till master is initialized and all regions are assigned + for (TableName TABLE : TABLES) { + UTIL.waitTableAvailable(TABLE); + } + UTIL.waitUntilNoRegionsInTransition(60000); + + snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); + snapshot.initialize(); + Map newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap(); + assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size()); + for (Map.Entry entry : newRegionToRegionServerMap.entrySet()) { + ServerName oldServer = regionToRegionServerMap.get(entry.getKey()); + ServerName currentServer = entry.getValue(); + LOG.info( + "Key=" + entry.getKey() + " oldServer=" + oldServer + ", currentServer=" + currentServer); + assertEquals(entry.getKey().toString(), oldServer.getAddress(), currentServer.getAddress()); + + if (deadRS.getPort() == oldServer.getPort()) { + // Restarted RS start code wont be same + assertNotEquals(oldServer.getStartcode(), currentServer.getStartcode()); + } else { + assertEquals(oldServer.getStartcode(), currentServer.getStartcode()); + } + } + } + + private void setupCluster() throws Exception, IOException, InterruptedException { + // Set Zookeeper based connection registry since we will stop master and start a new master + // without populating the underlying config for the connection. + UTIL.getConfiguration().set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + HConstants.ZK_CONNECTION_REGISTRY_CLASS); + // Enable retain assignment during ServerCrashProcedure + UTIL.getConfiguration().setBoolean(ServerCrashProcedure.MASTER_SCP_RETAIN_ASSIGNMENT, true); + UTIL.startMiniCluster(NUM_OF_RS); + + // Turn off balancer + UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false); + + LOG.info("\n\nCreating tables"); + for (TableName TABLE : TABLES) { + UTIL.createTable(TABLE, FAMILY); + } + for (TableName TABLE : TABLES) { + UTIL.waitTableEnabled(TABLE); + } + + UTIL.getMiniHBaseCluster().getMaster(); + UTIL.waitUntilNoRegionsInTransition(60000); + } + + private void ensureServersWithSamePort(HMaster master, int[] rsPorts) { + // Make sure live regionservers are on the same host/port + List localServers = master.getServerManager().getOnlineServersList(); + assertEquals(NUM_OF_RS, localServers.size()); + for (int i = 0; i < NUM_OF_RS; i++) { + boolean found = false; + for (ServerName serverName : localServers) { + if (serverName.getPort() == rsPorts[i]) { + found = true; + break; + } + } + assertTrue(found); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java new file mode 100644 index 000000000000..2cbb2c7e2f63 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRetainAssignmentOnRestartSplitWithoutZk + extends TestRetainAssignmentOnRestart { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRetainAssignmentOnRestartSplitWithoutZk.class); + + @Override + protected boolean splitWALCoordinatedByZk() { + return false; + } +} From 3d3c66c78d4330bf4d709b716cac875c1cead45b Mon Sep 17 00:00:00 2001 From: robin7roy <57340134+robin7roy@users.noreply.github.com> Date: Wed, 3 Feb 2021 10:34:34 +0530 Subject: [PATCH 669/769] HBASE-25546 PerfTestCompactionPolicies is failing because of NPE (#2921) Adds mock for getRegionInfo which was missing earlier. Signed-off-by: Pankaj Kumar --- .../regionserver/compactions/PerfTestCompactionPolicies.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 13c7a6bc1039..0c84507c9568 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; @@ -197,6 +198,7 @@ private HStore createMockStore() { HStore s = mock(HStore.class); when(s.getStoreFileTtl()).thenReturn(Long.MAX_VALUE); when(s.getBlockingFileCount()).thenReturn(7L); + when(s.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); return s; } From a4be96394004763e389389761f3f1910e28fc270 Mon Sep 17 00:00:00 2001 From: robin7roy <57340134+robin7roy@users.noreply.github.com> Date: Thu, 4 Feb 2021 20:55:40 +0530 Subject: [PATCH 670/769] HBASE-25546 PerfTestCompactionPolicies is failing because of NPE (addendum) (#2926) Added HBaseClassTestRule for PerfTestCompactionPolicies --- .../compactions/PerfTestCompactionPolicies.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 0c84507c9568..e6b0499c738d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -28,6 +28,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.HStore; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -45,6 +47,10 @@ @RunWith(Parameterized.class) public class PerfTestCompactionPolicies extends MockStoreFileGenerator { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(PerfTestCompactionPolicies.class); + private final RatioBasedCompactionPolicy cp; private final StoreFileListGenerator generator; private final HStore store; From 59a9e23c00d0780fd16014e951e2b9451e322395 Mon Sep 17 00:00:00 2001 From: stack Date: Thu, 4 Feb 2021 11:27:06 -0800 Subject: [PATCH 671/769] HBASE-25546 PerfTestCompactionPolicies is failing because of NPE (addendum2); add class comment --- .../regionserver/compactions/PerfTestCompactionPolicies.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index e6b0499c738d..314b96695b6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -43,6 +43,11 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +/** + * This is not a unit test. It is not run as part of the general unit test suite. It is for + * comparing compaction policies. You must run it explicitly; + * e.g. mvn test -Dtest=PerfTestCompactionPolicies + */ @Category({RegionServerTests.class, MediumTests.class}) @RunWith(Parameterized.class) public class PerfTestCompactionPolicies extends MockStoreFileGenerator { From 63ca8c9a3da531cca77a3566d6d154ea090856b2 Mon Sep 17 00:00:00 2001 From: YutSean <33572832+YutSean@users.noreply.github.com> Date: Fri, 5 Feb 2021 15:37:34 +0800 Subject: [PATCH 672/769] HBASE-25543 When configuration hadoop.security.authorization is set to false, the system will still try to authorize an RPC and raise AccessDeniedException (#2919) Signed-off-by: Viraj Jasani Signed-off-by: Reid Chan --- .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java | 3 +++ .../java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index ca8593ee3d5d..b0e8b7d3d5d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -822,4 +822,7 @@ public void setNamedQueueRecorder(NamedQueueRecorder namedQueueRecorder) { this.namedQueueRecorder = namedQueueRecorder; } + protected boolean needAuthorization() { + return authorize; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 0226de4792c9..422003e1a6a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -449,7 +449,7 @@ public void processOneRpc(ByteBuff buf) throws IOException, } else { processConnectionHeader(buf); this.connectionHeaderRead = true; - if (!authorizeConnection()) { + if (rpcServer.needAuthorization() && !authorizeConnection()) { // Throw FatalConnectionException wrapping ACE so client does right thing and closes // down the connection instead of trying to read non-existent retun. throw new AccessDeniedException("Connection from " + this + " for service " + From 65d68d7d1ca6ce4f41f12567c3f099f06a794d98 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 5 Feb 2021 16:31:53 +0800 Subject: [PATCH 673/769] HBASE-25554 NPE when init RegionMover (#2927) Signed-off-by: Viraj Jasani --- .../main/java/org/apache/hadoop/hbase/util/RegionMover.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 210e9e17a39f..778d66da63d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -87,7 +87,6 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { public static final int DEFAULT_MOVE_RETRIES_MAX = 5; public static final int DEFAULT_MOVE_WAIT_MAX = 60; public static final int DEFAULT_SERVERSTART_WAIT_MAX = 180; - private final RackManager rackManager; private static final Logger LOG = LoggerFactory.getLogger(RegionMover.class); @@ -103,6 +102,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { private int port; private Connection conn; private Admin admin; + private RackManager rackManager; private RegionMover(RegionMoverBuilder builder) throws IOException { this.hostname = builder.hostname; @@ -125,7 +125,6 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { } private RegionMover() { - rackManager = new RackManager(conf); } @Override From f868fea987ace1f2078a178662bfcbf95984f506 Mon Sep 17 00:00:00 2001 From: XinSun Date: Sun, 7 Feb 2021 17:13:47 +0800 Subject: [PATCH 674/769] HBASE-25553 It is better for ReplicationTracker.getListOfRegionServers to return ServerName instead of String (#2928) Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../hbase/replication/ReplicationTracker.java | 7 ++++--- .../replication/ReplicationTrackerZKImpl.java | 16 ++++++++++------ .../regionserver/DumpReplicationQueues.java | 4 ++-- .../regionserver/ReplicationSourceManager.java | 3 +-- .../TestReplicationTrackerZKImpl.java | 18 +++++++++--------- 5 files changed, 26 insertions(+), 22 deletions(-) diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java index 93a32630d559..a33e23dc96b8 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTracker.java @@ -20,6 +20,7 @@ import java.util.List; +import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; /** @@ -37,13 +38,13 @@ public interface ReplicationTracker { * Register a replication listener to receive replication events. * @param listener the listener to register */ - public void registerListener(ReplicationListener listener); + void registerListener(ReplicationListener listener); - public void removeListener(ReplicationListener listener); + void removeListener(ReplicationListener listener); /** * Returns a list of other live region servers in the cluster. * @return List of region servers. */ - public List getListOfRegionServers(); + List getListOfRegionServers(); } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java index 54c9c2cdc0af..6fc3c452723d 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java @@ -20,7 +20,10 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.stream.Collectors; + import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -49,7 +52,7 @@ public class ReplicationTrackerZKImpl implements ReplicationTracker { // listeners to be notified private final List listeners = new CopyOnWriteArrayList<>(); // List of all the other region servers in this cluster - private final ArrayList otherRegionServers = new ArrayList<>(); + private final List otherRegionServers = new ArrayList<>(); public ReplicationTrackerZKImpl(ZKWatcher zookeeper, Abortable abortable, Stoppable stopper) { this.zookeeper = zookeeper; @@ -74,10 +77,10 @@ public void removeListener(ReplicationListener listener) { * Return a snapshot of the current region servers. */ @Override - public List getListOfRegionServers() { + public List getListOfRegionServers() { refreshOtherRegionServersList(false); - List list = null; + List list = null; synchronized (otherRegionServers) { list = new ArrayList<>(otherRegionServers); } @@ -162,7 +165,7 @@ private String getZNodeName(String fullPath) { * if it was empty), false if the data was missing in ZK */ private boolean refreshOtherRegionServersList(boolean watch) { - List newRsList = getRegisteredRegionServers(watch); + List newRsList = getRegisteredRegionServers(watch); if (newRsList == null) { return false; } else { @@ -178,7 +181,7 @@ private boolean refreshOtherRegionServersList(boolean watch) { * Get a list of all the other region servers in this cluster and set a watch * @return a list of server nanes */ - private List getRegisteredRegionServers(boolean watch) { + private List getRegisteredRegionServers(boolean watch) { List result = null; try { if (watch) { @@ -190,6 +193,7 @@ private List getRegisteredRegionServers(boolean watch) { } catch (KeeperException e) { this.abortable.abort("Get list of registered region servers", e); } - return result; + return result == null ? null : + result.stream().map(ServerName::parseServerName).collect(Collectors.toList()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index cc0d9bbaa2e7..92c57a89d6be 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -308,7 +308,7 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); replicationTracker = ReplicationFactory.getReplicationTracker(zkw, new WarnOnlyAbortable(), new WarnOnlyStoppable()); - Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); + Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); // Loops each peer on each RS and dumps the queues List regionservers = queueStorage.getListOfReplicators(); @@ -317,7 +317,7 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, } for (ServerName regionserver : regionservers) { List queueIds = queueStorage.getAllQueues(regionserver); - if (!liveRegionServers.contains(regionserver.getServerName())) { + if (!liveRegionServers.contains(regionserver)) { deadRegionServers.add(regionserver.getServerName()); } for (String queueId : queueIds) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index c1166802b0ee..303a091ac98f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -283,8 +283,7 @@ private void adoptAbandonedQueues() { if (currentReplicators == null || currentReplicators.isEmpty()) { return; } - List otherRegionServers = replicationTracker.getListOfRegionServers().stream() - .map(ServerName::valueOf).collect(Collectors.toList()); + List otherRegionServers = replicationTracker.getListOfRegionServers(); LOG.info( "Current list of replicators: " + currentReplicators + " other RSs: " + otherRegionServers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index 1500a717c401..da82e19f2ab3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -115,26 +115,26 @@ public void testGetListOfRegionServers() throws Exception { assertEquals(0, rt.getListOfRegionServers().size()); // 1 region server - ZKUtil.createWithParents(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname1.example.org:1234")); - List rss = rt.getListOfRegionServers(); + ZKUtil.createWithParents(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname1.example.org,1234,1611218678009")); + List rss = rt.getListOfRegionServers(); assertEquals(rss.toString(), 1, rss.size()); // 2 region servers - ZKUtil.createWithParents(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname2.example.org:1234")); + ZKUtil.createWithParents(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname2.example.org,1234,1611218678009")); rss = rt.getListOfRegionServers(); assertEquals(rss.toString(), 2, rss.size()); // 1 region server - ZKUtil.deleteNode(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname2.example.org:1234")); + ZKUtil.deleteNode(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname2.example.org,1234,1611218678009")); rss = rt.getListOfRegionServers(); assertEquals(1, rss.size()); // 0 region server - ZKUtil.deleteNode(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, "hostname1.example.org:1234")); + ZKUtil.deleteNode(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().rsZNode, + "hostname1.example.org,1234,1611218678009")); rss = rt.getListOfRegionServers(); assertEquals(rss.toString(), 0, rss.size()); } From e386473dc612dcec1a12243e7cddb212e61482d5 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:06:52 +0300 Subject: [PATCH 675/769] Added AdaptiveLruBlockCache --- .../apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index a72e86b132a4..88f27f12d703 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -134,8 +134,13 @@ * can adjust to it and set the coefficient to lower value. * For example, we set the coefficient = 0.01. It means the overhead (see above) will be * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, +<<<<<<< HEAD * if the overhead = 300% and the coefficient = 0.01, * then percent of caching blocks will * reduce by 3%. +======= + * if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. +>>>>>>> Added AdaptiveLruBlockCache * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term * fluctuation and we will try to stay in this mode. It helps avoid premature exit during * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. From e527cae981fd24999d501038bc813243e59c8785 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:20:51 +0300 Subject: [PATCH 676/769] Added AdaptiveLruBlockCache + rebase --- .../hbase/io/hfile/AdaptiveLruBlockCache.java | 38 +++++++++++-------- .../io/hfile/TestAdaptiveLruBlockCache.java | 12 ++++-- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index 88f27f12d703..8318d998ce59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -327,7 +327,7 @@ public class AdaptiveLruBlockCache implements FirstLevelBlockCache { * @param blockSize approximate size of each block, in bytes */ public AdaptiveLruBlockCache(long maxSize, long blockSize) { - this(maxSize, blockSize, true); + this(maxSize, blockSize,true); } /** @@ -349,7 +349,8 @@ public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThrea DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); } - public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { + public AdaptiveLruBlockCache(long maxSize, long blockSize, + boolean evictionThread, Configuration conf) { this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), DEFAULT_LOAD_FACTOR, @@ -470,17 +471,17 @@ public int getCacheDataBlockPercent() { } /** - * The block cached in AdaptiveLruBlockCache will always be an heap block: on the one side, the heap - * access will be more faster then off-heap, the small index block or meta block cached in - * CombinedBlockCache will benefit a lot. on other side, the AdaptiveLruBlockCache size is always - * calculated based on the total heap size, if caching an off-heap block in AdaptiveLruBlockCache, the - * heap size will be messed up. Here we will clone the block into an heap block if it's an - * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of - * the block (HBASE-22127):
    + * The block cached in AdaptiveLruBlockCache will always be an heap block: on the one side, + * the heap access will be more faster then off-heap, the small index block or meta block + * cached in CombinedBlockCache will benefit a lot. on other side, the AdaptiveLruBlockCache size + * is always * calculated based on the total heap size, if caching an off-heap block in + * AdaptiveLruBlockCache, the heap size will be messed up. Here we will clone the block into an + * heap block if it's an off-heap block, otherwise just use the original block. The key point is + * maintain the refCnt of the block (HBASE-22127):
    * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
    * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's - * reservoir, if both RPC and AdaptiveLruBlockCache release the block, then it can be garbage collected by - * JVM, so need a retain here. + * reservoir, if both RPC and AdaptiveLruBlockCache release the block, then it can be garbage + * collected by JVM, so need a retain here. * @param buf the original block * @return an block with an heap memory backend. */ @@ -491,7 +492,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { return HFileBlock.deepCloneOnHeap(blk); } } - // The block will be referenced by this AdaptiveLruBlockCache, so should increase its refCnt here. + // The block will be referenced by this AdaptiveLruBlockCache, + // so should increase its refCnt here. return buf.retain(); } @@ -537,7 +539,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) } LruCachedBlock cb = map.get(cacheKey); - if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, buf)) { + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, + buf)) { return; } long currentSize = size.get(); @@ -806,9 +809,12 @@ long evict() { } // Instantiate priority buckets - BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); - BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); - BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + BlockBucket bucketSingle + = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti + = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory + = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); // Scan entire map putting into appropriate buckets for (LruCachedBlock cachedBlock : map.values()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java index fa2f9afed5c2..fb14a050dfbd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java @@ -984,7 +984,8 @@ static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exc HFileContext meta = new HFileContextBuilder().build(); BlockCacheKey key = new BlockCacheKey("key1", 0); HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1, - ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, + ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, + 52, -1, meta, HEAP); AtomicBoolean err1 = new AtomicBoolean(false); Thread t1 = new Thread(() -> { @@ -1037,7 +1038,8 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); AdaptiveLruBlockCache cache = - new AdaptiveLruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + new AdaptiveLruBlockCache(maxSize, blockSize, false, ( + int) Math.ceil(1.2 * maxSize / blockSize), AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 0.99f, // acceptable @@ -1056,10 +1058,12 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E long maxSize = 100000000; int numBlocks = 100000; final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + assertTrue("calculateBlockSize appears broken.", + blockSize * numBlocks <= maxSize); final AdaptiveLruBlockCache cache = - new AdaptiveLruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + new AdaptiveLruBlockCache(maxSize, blockSize, true, + (int) Math.ceil(1.2 * maxSize / blockSize), AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min 0.99f, // acceptable From f4a1b3da341bb1596c302c63cbd8d6046737cbf0 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:23:26 +0300 Subject: [PATCH 677/769] Added AdaptiveLruBlockCache + rebase --- .../hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java index fb14a050dfbd..ab2e566de9f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java @@ -71,7 +71,8 @@ public void testCacheEvictionThreadSafe() throws Exception { int numBlocks = 9; int testRuns = 10; final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + assertTrue("calculateBlockSize appears broken.", + blockSize * numBlocks <= maxSize); final Configuration conf = HBaseConfiguration.create(); final AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize); From dca4a51d528f0fbd228363c225b6b6891666aac8 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 25 Nov 2020 14:10:12 -0800 Subject: [PATCH 678/769] HBASE-24640 Purge use of VisibleForTesting (#2695) Signed-off-by: Reid Chan Signed-off-by: Nick Dimiduk --- .../java/org/apache/hadoop/hbase/master/SplitWALManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index 6db094c4e6df..e72b607a7f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WALSplitUtil; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; From 5ef67e6bf2716b30a5e96131538ad370a12e7f9e Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:28:15 +0300 Subject: [PATCH 679/769] Added AdaptiveLruBlockCache + rebase --- .../apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index 8318d998ce59..dab90f96af19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -48,7 +48,8 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This realisation improve performance of classical LRU cache up to 3 times via reduce GC job. + * This realisation improve performance of classical LRU + * cache up to 3 times via reduce GC job. *

    * The classical block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a From 2492ca20e16a019c68f2ac5c9413fdd6938d8585 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:28:54 +0300 Subject: [PATCH 680/769] Added AdaptiveLruBlockCache + rebase --- .../hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index dab90f96af19..43034b6e891f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -51,10 +51,10 @@ * This realisation improve performance of classical LRU * cache up to 3 times via reduce GC job. *

    - * The classical block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an - * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a - * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} - * operations. + * The classical block cache implementation that is memory-aware using {@link HeapSize}, + * memory-bound using an LRU eviction algorithm, and concurrent: backed by + * a {@link ConcurrentHashMap} and with a non-blocking eviction thread giving + * constant-time {@link #cacheBlock} and {@link #getBlock} operations. *

    * Contains three levels of block priority to allow for scan-resistance and in-memory families * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An From 6bf2b0a81da0e0b388691d1f399e1e5e267b11d2 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 9 Mar 2020 23:21:49 +0300 Subject: [PATCH 681/769] Update LruBlockCache.java --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 27 ++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index bc0df4306b62..b80c3ab4ae52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -145,6 +145,9 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; + private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; + private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 100; + /** * Defined the cache map as {@link ConcurrentHashMap} here, because in * {@link LruBlockCache#getBlock}, we need to guarantee the atomicity of map#computeIfPresent @@ -225,6 +228,9 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; + /** Percent of cached Data blocks */ + private final int cacheDataBlockPercent; + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). @@ -252,7 +258,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { DEFAULT_MEMORY_FACTOR, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, - DEFAULT_MAX_BLOCK_SIZE); + DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { @@ -268,7 +275,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), - conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -294,7 +302,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize) { + boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -330,6 +338,11 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, } else { this.evictionThread = null; } + + // check the bounds + cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; + this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, @@ -392,6 +405,14 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + // Don't cache this DATA block if we have limit on BlockCache, + // good for performance (HBASE-23887) + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; + } + } + if (buf.heapSize() > maxBlockSize) { // If there are a lot of blocks that are too // big this can make the logs way too noisy. From 444f37d667d0709291384216195745cbc25e2410 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 9 Mar 2020 23:22:39 +0300 Subject: [PATCH 682/769] Update TestLruBlockCache.java --- .../hbase/io/hfile/TestLruBlockCache.java | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index af70f3db7cc4..96ba3a9e3592 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -342,7 +342,8 @@ public void testCacheEvictionThreePriorities() throws Exception { 0.34f, // memory 1.2f, // limit false, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -464,7 +465,8 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 0.5f, // memory 1.2f, // limit true, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -571,7 +573,8 @@ public void testScanResistance() throws Exception { 0.34f, // memory 1.2f, // limit false, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -635,7 +638,8 @@ public void testMaxBlockSize() throws Exception { 0.34f, // memory 1.2f, // limit false, - 1024); + 1024, + 100); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -675,7 +679,8 @@ public void testResizeBlockCache() throws Exception { 0.34f, // memory 1.2f, // limit false, - 16 * 1024 * 1024); + 16 * 1024 * 1024, + 100); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -837,7 +842,8 @@ public void testCacheBlockNextBlockMetadataMissing() { 0.34f, // memory 1.2f, // limit false, - 1024); + 1024, + 100); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1026,8 +1032,8 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.33f, // multi 0.34f, // memory 1.2f, // limit - false, 1024); + false, 1024, + 100); testMultiThreadGetAndEvictBlockInternal(cache); } } - From 58d51a9a06d79727dcfe9fde1c97144944151fbb Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 07:36:45 +0300 Subject: [PATCH 683/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index b80c3ab4ae52..0f593442a774 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -228,7 +228,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached Data blocks */ + /** Percent of cached data blocks */ private final int cacheDataBlockPercent; /** From 543524d7dbbd7ad4e5d62c3b80f543fabcb7d6a5 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 10:42:33 +0300 Subject: [PATCH 684/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0f593442a774..b80c3ab4ae52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -228,7 +228,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached data blocks */ + /** Percent of cached Data blocks */ private final int cacheDataBlockPercent; /** From 4f54ed12d455b1da27ae5231704bafb8ee948a24 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 13:21:11 +0300 Subject: [PATCH 685/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index b80c3ab4ae52..0f593442a774 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -228,7 +228,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached Data blocks */ + /** Percent of cached data blocks */ private final int cacheDataBlockPercent; /** From ae02ebd67a264f231f70aa67397f6d02370d14a5 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 14:55:09 +0300 Subject: [PATCH 686/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0f593442a774..b80c3ab4ae52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -228,7 +228,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached data blocks */ + /** Percent of cached Data blocks */ private final int cacheDataBlockPercent; /** From e34919f8dc2c35476641224a38e7c6cf8760338c Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 10 Mar 2020 17:37:49 +0300 Subject: [PATCH 687/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index b80c3ab4ae52..0f593442a774 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -228,7 +228,7 @@ public class LruBlockCache implements FirstLevelBlockCache { */ private transient BlockCache victimHandler = null; - /** Percent of cached Data blocks */ + /** Percent of cached data blocks */ private final int cacheDataBlockPercent; /** From a1e086353baa7da605228afb249c9c0af8dd5435 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 15:06:06 +0300 Subject: [PATCH 688/769] Update LruBlockCache.java Added parameters that help to control of eviction process: hbase.lru.cache.heavy.eviction.count.limit - set how many times have to run eviction process that avoid of putting data to BlockCache hbase.lru.cache.heavy.eviction.bytes.size.limit - set how many bytes have to evicted each time that avoid of putting data to BlockCache By default if 10 times (100 secudns) evicted more than 10 MB (each time) then we start to skip 50% of data blocks. --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 117 +++++++++--------- 1 file changed, 61 insertions(+), 56 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0f593442a774..37d40e40ff5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -37,52 +37,60 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.base.Objects; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an - * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a - * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} - * operations. - *

    + * A block cache implementation that is memory-aware using {@link HeapSize}, + * memory-bound using an LRU eviction algorithm, and concurrent: backed by a + * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving + * constant-time {@link #cacheBlock} and {@link #getBlock} operations.

    + * * Contains three levels of block priority to allow for scan-resistance and in-memory families - * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An - * in-memory column family is a column family that should be served from memory if possible): - * single-access, multiple-accesses, and in-memory priority. A block is added with an in-memory - * priority flag if {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptor#isInMemory()}, - * otherwise a block becomes a single access priority the first time it is read into this block - * cache. If a block is accessed again while in cache, it is marked as a multiple access priority - * block. This delineation of blocks is used to prevent scans from thrashing the cache adding a - * least-frequently-used element to the eviction algorithm. - *

    - * Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each - * priority will retain close to its maximum size, however, if any priority is not using its entire - * chunk the others are able to grow beyond their chunk size. - *

    - * Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The - * block size is not especially important as this cache is fully dynamic in its sizing of blocks. It - * is only used for pre-allocating data structures and in initial heap estimation of the map. - *

    - * The detailed constructor defines the sizes for the three priorities (they should total to the - * maximum size defined). It also sets the levels that trigger and control the eviction - * thread. - *

    - * The acceptable size is the cache size level which triggers the eviction process to - * start. It evicts enough blocks to get the size below the minimum size specified. - *

    - * Eviction happens in a separate thread and involves a single full-scan of the map. It determines - * how many bytes must be freed to reach the minimum size, and then while scanning determines the - * fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times - * bytes to free). It then uses the priority chunk sizes to evict fairly according to the relative - * sizes and usage. + * {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column + * family is a column family that should be served from memory if possible): + * single-access, multiple-accesses, and in-memory priority. + * A block is added with an in-memory priority flag if + * {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()}, otherwise a block becomes a + * single access priority the first time it is read into this block cache. If a block is + * accessed again while in cache, it is marked as a multiple access priority block. This + * delineation of blocks is used to prevent scans from thrashing the cache adding a + * least-frequently-used element to the eviction algorithm.

    + * + * Each priority is given its own chunk of the total cache to ensure + * fairness during eviction. Each priority will retain close to its maximum + * size, however, if any priority is not using its entire chunk the others + * are able to grow beyond their chunk size.

    + * + * Instantiated at a minimum with the total size and average block size. + * All sizes are in bytes. The block size is not especially important as this + * cache is fully dynamic in its sizing of blocks. It is only used for + * pre-allocating data structures and in initial heap estimation of the map.

    + * + * The detailed constructor defines the sizes for the three priorities (they + * should total to the maximum size defined). It also sets the levels that + * trigger and control the eviction thread.

    + * + * The acceptable size is the cache size level which triggers the eviction + * process to start. It evicts enough blocks to get the size below the + * minimum size specified.

    + * + * Eviction happens in a separate thread and involves a single full-scan + * of the map. It determines how many bytes must be freed to reach the minimum + * size, and then while scanning determines the fewest least-recently-used + * blocks necessary from each of the three priorities (would be 3 times bytes + * to free). It then uses the priority chunk sizes to evict fairly according + * to the relative sizes and usage. */ @InterfaceAudience.Private public class LruBlockCache implements FirstLevelBlockCache { @@ -454,7 +462,7 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) map.put(cacheKey, cb); long val = elements.incrementAndGet(); if (buf.getBlockType().isData()) { - dataBlockElements.increment(); + dataBlockElements.increment(); } if (LOG.isTraceEnabled()) { long size = map.size(); @@ -511,7 +519,7 @@ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { heapsize *= -1; } if (bt != null && bt.isData()) { - dataBlockSize.add(heapsize); + dataBlockSize.add(heapsize); } return size.addAndGet(heapsize); } @@ -597,9 +605,8 @@ public int evictBlocksByHfileName(String hfileName) { int numEvicted = 0; for (BlockCacheKey key : map.keySet()) { if (key.getHfileName().equals(hfileName)) { - if (evictBlock(key)) { + if (evictBlock(key)) ++numEvicted; - } } } if (victimHandler != null) { @@ -656,10 +663,12 @@ private void runEviction() { } } + @VisibleForTesting boolean isEvictionInProgress() { return evictionInProgress; } + @VisibleForTesting long getOverhead() { return overhead; } @@ -670,9 +679,7 @@ long getOverhead() { void evict() { // Ensure only one eviction at a time - if (!evictionLock.tryLock()) { - return; - } + if(!evictionLock.tryLock()) return; try { evictionInProgress = true; @@ -685,9 +692,7 @@ void evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) { - return; - } + if (bytesToFree <= 0) return; // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); @@ -936,7 +941,7 @@ EvictionThread getEvictionThread() { * * Thread is triggered into action by {@link LruBlockCache#runEviction()} */ - static class EvictionThread extends Thread { + static class EvictionThread extends HasThread { private WeakReference cache; private volatile boolean go = true; @@ -962,9 +967,7 @@ public void run() { } } LruBlockCache cache = this.cache.get(); - if (cache == null) { - break; - } + if (cache == null) break; cache.evict(); } } @@ -1041,8 +1044,10 @@ public CacheStats getStats() { return this.stats; } - public final static long CACHE_FIXED_OVERHEAD = - ClassSize.estimateBase(LruBlockCache.class, false); + public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( + (4 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + + (6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + + ClassSize.OBJECT); @Override public long heapSize() { @@ -1110,13 +1115,9 @@ public String getFilename() { @Override public int compareTo(CachedBlock other) { int diff = this.getFilename().compareTo(other.getFilename()); - if (diff != 0) { - return diff; - } + if (diff != 0) return diff; diff = Long.compare(this.getOffset(), other.getOffset()); - if (diff != 0) { - return diff; - } + if (diff != 0) return diff; if (other.getCachedTime() < 0 || this.getCachedTime() < 0) { throw new IllegalStateException(this.getCachedTime() + ", " + other.getCachedTime()); } @@ -1191,6 +1192,7 @@ public void shutdown() { } /** Clears the cache. Used in tests. */ + @VisibleForTesting public void clearCache() { this.map.clear(); this.elements.set(0); @@ -1201,6 +1203,7 @@ public void clearCache() { * * @return the set of cached file names */ + @VisibleForTesting SortedSet getCachedFileNamesForTest() { SortedSet fileNames = new TreeSet<>(); for (BlockCacheKey cacheKey : map.keySet()) { @@ -1209,6 +1212,7 @@ SortedSet getCachedFileNamesForTest() { return fileNames; } + @VisibleForTesting public Map getEncodingCountsForTest() { Map counts = new EnumMap<>(DataBlockEncoding.class); for (LruCachedBlock block : map.values()) { @@ -1219,6 +1223,7 @@ public Map getEncodingCountsForTest() { return counts; } + @VisibleForTesting Map getMapForTests() { return map; } From 7092e88117962de3dca5a522fcb39fde930ffdc7 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 15:10:24 +0300 Subject: [PATCH 689/769] Update TestLruBlockCache.java Fixed tests --- .../hbase/io/hfile/TestLruBlockCache.java | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 96ba3a9e3592..8d24f0205a0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -343,7 +343,9 @@ public void testCacheEvictionThreePriorities() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -466,7 +468,9 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 1.2f, // limit true, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -574,7 +578,10 @@ public void testScanResistance() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); + CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -639,7 +646,9 @@ public void testMaxBlockSize() throws Exception { 1.2f, // limit false, 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -680,7 +689,9 @@ public void testResizeBlockCache() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -843,7 +854,9 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1033,7 +1046,9 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.34f, // memory 1.2f, // limit false, 1024, - 100); + 100, + 10 + 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); } } From 17a95bf3352ac4c3bb93b2fc823e0ebe5732521c Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 16:59:14 +0300 Subject: [PATCH 690/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 37d40e40ff5b..3b5f3ac36c6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -684,7 +684,7 @@ void evict() { try { evictionInProgress = true; long currentSize = this.size.get(); - long bytesToFree = currentSize - minSize(); + bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { LOG.trace("Block cache LRU eviction started; Attempting to free " + From 506663b1e917fbde2edf17a067f3a103a4b8c15c Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 3 May 2020 18:34:34 +0300 Subject: [PATCH 691/769] Update TestLruBlockCache.java fix bug --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 8d24f0205a0a..50889a891921 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -344,7 +344,7 @@ public void testCacheEvictionThreePriorities() throws Exception { false, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); @@ -469,7 +469,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { true, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); @@ -579,7 +579,7 @@ public void testScanResistance() throws Exception { false, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); @@ -647,7 +647,7 @@ public void testMaxBlockSize() throws Exception { false, 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -690,7 +690,7 @@ public void testResizeBlockCache() throws Exception { false, 16 * 1024 * 1024, 100, - 10 + 10, 10 * 1024 * 1024); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); @@ -855,7 +855,7 @@ public void testCacheBlockNextBlockMetadataMissing() { false, 1024, 100, - 10 + 10, 10 * 1024 * 1024); BlockCacheKey key = new BlockCacheKey("key1", 0); @@ -1047,7 +1047,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 1.2f, // limit false, 1024, 100, - 10 + 10, 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); } From c9f7e81e1eaaebd3f4eb018425b063e2a207ec8f Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 13:51:53 +0300 Subject: [PATCH 692/769] Update LruBlockCache.java fixed style --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 65 +++++++++++++++---- 1 file changed, 51 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 3b5f3ac36c6f..e273c5a6ec9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -154,7 +154,15 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; - private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 100; + private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 50; + + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT + = "hbase.lru.cache.heavy.eviction.count.limit"; + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; + + private static final String LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT + = "hbase.lru.cache.heavy.eviction.bytes.size.limit"; + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT = 10 * 1024 * 1024; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in @@ -238,7 +246,16 @@ public class LruBlockCache implements FirstLevelBlockCache { /** Percent of cached data blocks */ private final int cacheDataBlockPercent; - + + /** Counter to control of eviction process */ + private static int heavyEvictionCount; + + /** Limit of count eviction process when start to avoid to cache blocks */ + private final int heavyEvictionCountLimit; + + /** Limit of volume eviction process when start to avoid to cache blocks */ + private static int heavyEvictionBytesSizeLimit; + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). @@ -267,7 +284,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT); + DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { @@ -284,7 +303,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), - conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT)); + conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -310,7 +331,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent) { + boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, + int heavyEvictionCountLimit, int heavyEvictionBytesSizeLimit) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -350,6 +372,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, // check the bounds cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; + this.heavyEvictionCountLimit = heavyEvictionCountLimit; + this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. @@ -413,11 +437,15 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { - // Don't cache this DATA block if we have limit on BlockCache, - // good for performance (HBASE-23887) - if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { - return; + + // Don't cache this DATA block when too many blocks evicted + // and if we have limit on percent of blocks to cache + // good for performance (HBASE-23887) + if (heavyEvictionCount > heavyEvictionCountLimit) { + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; + } } } @@ -676,10 +704,11 @@ long getOverhead() { /** * Eviction method. */ - void evict() { + long evict() { // Ensure only one eviction at a time - if(!evictionLock.tryLock()) return; + if(!evictionLock.tryLock()) return 0; + long bytesToFree = 0L; try { evictionInProgress = true; @@ -692,7 +721,7 @@ void evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) return; + if (bytesToFree <= 0) return 0; // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); @@ -791,6 +820,7 @@ void evict() { stats.evict(); evictionInProgress = false; evictionLock.unlock(); + return bytesToFree; } } @@ -957,6 +987,8 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { enteringRun = true; + long bytesFreed; + heavyEvictionCount = 0; while (this.go) { synchronized (this) { try { @@ -968,7 +1000,12 @@ public void run() { } LruBlockCache cache = this.cache.get(); if (cache == null) break; - cache.evict(); + bytesFreed = cache.evict(); + // Control of heavy cleaning BlockCache + if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) + heavyEvictionCount++; + else + heavyEvictionCount = 0; } } From 947da81d285b8df7152d6a15bc3dd4f11952b7c7 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 14:17:11 +0300 Subject: [PATCH 693/769] Update LruBlockCache.java fixed bug --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index e273c5a6ec9a..d8d428605102 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -156,7 +156,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 50; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = "hbase.lru.cache.heavy.eviction.count.limit"; private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; @@ -246,16 +246,16 @@ public class LruBlockCache implements FirstLevelBlockCache { /** Percent of cached data blocks */ private final int cacheDataBlockPercent; - + /** Counter to control of eviction process */ private static int heavyEvictionCount; /** Limit of count eviction process when start to avoid to cache blocks */ private final int heavyEvictionCountLimit; - + /** Limit of volume eviction process when start to avoid to cache blocks */ private static int heavyEvictionBytesSizeLimit; - + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). @@ -304,8 +304,10 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -437,8 +439,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - - // Don't cache this DATA block when too many blocks evicted + + // Don't cache this DATA block when too many blocks evict // and if we have limit on percent of blocks to cache // good for performance (HBASE-23887) if (heavyEvictionCount > heavyEvictionCountLimit) { @@ -707,7 +709,7 @@ long getOverhead() { long evict() { // Ensure only one eviction at a time - if(!evictionLock.tryLock()) return 0; + if (!evictionLock.tryLock()) {return 0}; long bytesToFree = 0L; try { @@ -721,7 +723,7 @@ long evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) return 0; + if (bytesToFree <= 0) {return 0}; // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); @@ -1002,10 +1004,12 @@ public void run() { if (cache == null) break; bytesFreed = cache.evict(); // Control of heavy cleaning BlockCache - if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) + if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) { heavyEvictionCount++; - else + } + else { heavyEvictionCount = 0; + } } } From 38b6cef2e4be81089fa1965ceffe8203187a662f Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 15:24:37 +0300 Subject: [PATCH 694/769] Update LruBlockCache.java fixed style --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index d8d428605102..29c456806bd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -709,7 +709,7 @@ long getOverhead() { long evict() { // Ensure only one eviction at a time - if (!evictionLock.tryLock()) {return 0}; + if (!evictionLock.tryLock()) {return 0;} long bytesToFree = 0L; try { @@ -723,7 +723,7 @@ long evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) {return 0}; + if (bytesToFree <= 0) {return 0;} // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); From 2dba3fe54925759a3d779f873780881773d37a71 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 19:04:53 +0300 Subject: [PATCH 695/769] Update TestLruBlockCache.java added unit test --- .../hbase/io/hfile/TestLruBlockCache.java | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 50889a891921..35ca376194a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1051,4 +1051,51 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); } + + public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exception { + long maxSize = 100000; + int numBlocks = 100; + final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + percentOfCachedBlocks, + 0, + 1); + + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + + final String hfileName = "hfile"; + for (int blockIndex = 0; blockIndex <= numBlocks * 5; ++blockIndex) { + CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); + cache.cacheBlock(block.cacheKey, block, false); + Thread.sleep(1); + } + + // Check if all offset of cached blocks less + // It means some of blocka were not put into BlockCache + for (BlockCacheKey key : cache.getMapForTests().keySet()) + Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + + } + + @Test + public void testSkipCacheDataBlocks() throws Exception { + for (int percentOfCachedBlocks = 25; percentOfCachedBlocks <= 100; percentOfCachedBlocks+=25) { + testSkipCacheDataBlocksInteral(percentOfCachedBlocks); + } + } } From a993189fc0256597c16797f36ce50c81394b90d2 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 19:06:18 +0300 Subject: [PATCH 696/769] Update TestLruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 35ca376194a7..a93e3e32c9b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1089,7 +1089,6 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc // It means some of blocka were not put into BlockCache for (BlockCacheKey key : cache.getMapForTests().keySet()) Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); - } @Test From c315fc74132ce504505077f5f14ffd5cab02f816 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 20:11:36 +0300 Subject: [PATCH 697/769] Update TestLruBlockCache.java fix codestyle --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index a93e3e32c9b1..c1285bc35813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1087,8 +1087,9 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc // Check if all offset of cached blocks less // It means some of blocka were not put into BlockCache - for (BlockCacheKey key : cache.getMapForTests().keySet()) + for (BlockCacheKey key : cache.getMapForTests().keySet()) { Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + } } @Test From 7c1f2ac90e96bbadd13386221c64b4e0f360d80f Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 20:12:22 +0300 Subject: [PATCH 698/769] Update LruBlockCache.java fix codestyle --- .../apache/hadoop/hbase/io/hfile/LruBlockCache.java | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 29c456806bd3..1745ddb601ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -304,9 +304,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, + conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); } @@ -709,7 +709,10 @@ long getOverhead() { long evict() { // Ensure only one eviction at a time - if (!evictionLock.tryLock()) {return 0;} + if (!evictionLock.tryLock()) { + return 0; + } + long bytesToFree = 0L; try { @@ -723,7 +726,9 @@ long evict() { StringUtils.byteDesc(currentSize)); } - if (bytesToFree <= 0) {return 0;} + if (bytesToFree <= 0) { + return 0; + } // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); From a259febfb57882b31df8ab4eb5119e3f421820db Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 21:28:05 +0300 Subject: [PATCH 699/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 1745ddb601ea..1bda3314bda8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -251,7 +251,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static int heavyEvictionCount; /** Limit of count eviction process when start to avoid to cache blocks */ - private final int heavyEvictionCountLimit; + private static int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ private static int heavyEvictionBytesSizeLimit; From e57d3de44a8c6c1dbd7e3dc1ed96ddb87d0498a9 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 09:05:43 +0300 Subject: [PATCH 700/769] Update LruBlockCache.java refactoring --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 1bda3314bda8..4d92fe57bec0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -248,13 +248,13 @@ public class LruBlockCache implements FirstLevelBlockCache { private final int cacheDataBlockPercent; /** Counter to control of eviction process */ - private static int heavyEvictionCount; + private volatile int heavyEvictionCount; /** Limit of count eviction process when start to avoid to cache blocks */ - private static int heavyEvictionCountLimit; + private final int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ - private static int heavyEvictionBytesSizeLimit; + private final int heavyEvictionBytesSizeLimit; /** * Default constructor. Specify maximum size and expected average block @@ -376,6 +376,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; this.heavyEvictionCountLimit = heavyEvictionCountLimit; this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; + this.heavyEvictionCount = 0; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. @@ -441,8 +442,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { // Don't cache this DATA block when too many blocks evict - // and if we have limit on percent of blocks to cache - // good for performance (HBASE-23887) + // and if we have limit on percent of blocks to cache. + // It is good for performance (HBASE-23887) if (heavyEvictionCount > heavyEvictionCountLimit) { if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { @@ -995,7 +996,6 @@ public EvictionThread(LruBlockCache cache) { public void run() { enteringRun = true; long bytesFreed; - heavyEvictionCount = 0; while (this.go) { synchronized (this) { try { @@ -1008,12 +1008,14 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; bytesFreed = cache.evict(); - // Control of heavy cleaning BlockCache - if (bytesFreed > 0 && bytesFreed > heavyEvictionBytesSizeLimit) { - heavyEvictionCount++; + // If heavy cleaning BlockCache control. + // It helps avoid put too many blocks into BlockCache + // when evict() works very active. + if (bytesFreed > 0 && bytesFreed > cache.heavyEvictionBytesSizeLimit) { + cache.heavyEvictionCount++; } else { - heavyEvictionCount = 0; + cache.heavyEvictionCount = 0; } } } @@ -1092,8 +1094,8 @@ public CacheStats getStats() { public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( (4 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + - (6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) - + ClassSize.OBJECT); + (6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + + (4 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); @Override public long heapSize() { From f87253defb2fdaf67a4baacc61e7ef28784a4345 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 09:06:47 +0300 Subject: [PATCH 701/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 4d92fe57bec0..a8a7c9ea4be5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -373,7 +373,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, // check the bounds cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; - this.cacheDataBlockPercent = cacheDataBlockPercent < 0 ? 0 : cacheDataBlockPercent; + this.cacheDataBlockPercent = cacheDataBlockPercent < 1 ? 1 : cacheDataBlockPercent; this.heavyEvictionCountLimit = heavyEvictionCountLimit; this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; this.heavyEvictionCount = 0; From 77196199b2a8af918a80f743f0872897d0d241f4 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 09:09:12 +0300 Subject: [PATCH 702/769] Update TestLruBlockCache.java added comments --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index c1285bc35813..188b54408c5e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1085,7 +1085,7 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc Thread.sleep(1); } - // Check if all offset of cached blocks less + // Check if all offset (last two digits) of cached blocks less than the percent. // It means some of blocka were not put into BlockCache for (BlockCacheKey key : cache.getMapForTests().keySet()) { Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); From ebf21dcd334c1e9aa9e633d0526d4b6e05aa7589 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 11:09:51 +0300 Subject: [PATCH 703/769] Update TestLruBlockCache.java adjust tests to default params --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 188b54408c5e..f983d7c1f88e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -343,7 +343,7 @@ public void testCacheEvictionThreePriorities() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -468,7 +468,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 1.2f, // limit true, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -578,7 +578,7 @@ public void testScanResistance() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -646,7 +646,7 @@ public void testMaxBlockSize() throws Exception { 1.2f, // limit false, 1024, - 100, + 50, 10, 10 * 1024 * 1024); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); @@ -689,7 +689,7 @@ public void testResizeBlockCache() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -854,7 +854,7 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024, - 100, + 50, 10, 10 * 1024 * 1024); @@ -1046,7 +1046,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.34f, // memory 1.2f, // limit false, 1024, - 100, + 50, 10, 10 * 1024 * 1024); testMultiThreadGetAndEvictBlockInternal(cache); From e6237df7bcaa8cfa2d01fa510830fba4df24b8cf Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 11:56:29 +0300 Subject: [PATCH 704/769] Update LruBlockCache.java Implemented new auto-scale eviction rate logic --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 106 ++++++++++++------ 1 file changed, 73 insertions(+), 33 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index a8a7c9ea4be5..7eb40a83decb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -153,16 +153,17 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; - private static final String LRU_CACHE_DATA_BLOCK_PERCENT = "hbase.lru.cache.data.block.percent"; - private static final int DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT = 50; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = "hbase.lru.cache.heavy.eviction.count.limit"; private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; - private static final String LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.bytes.size.limit"; - private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT = 10 * 1024 * 1024; + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT + = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; + + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT + = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final double DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in @@ -245,16 +246,16 @@ public class LruBlockCache implements FirstLevelBlockCache { private transient BlockCache victimHandler = null; /** Percent of cached data blocks */ - private final int cacheDataBlockPercent; - - /** Counter to control of eviction process */ - private volatile int heavyEvictionCount; + private volatile int cacheDataBlockPercent; /** Limit of count eviction process when start to avoid to cache blocks */ private final int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ - private final int heavyEvictionBytesSizeLimit; + private final int heavyEvictionMbSizeLimit; + + /** Adjust auto-scaling via overhead of evition rate */ + private final double heavyEvictionOverheadCoefficient; /** * Default constructor. Specify maximum size and expected average block @@ -284,9 +285,9 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT); + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { @@ -303,11 +304,12 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), - conf.getInt(LRU_CACHE_DATA_BLOCK_PERCENT, DEFAULT_LRU_CACHE_DATA_BLOCK_PERCENT), conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_BYTES_SIZE_LIMIT)); + conf.getInt(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), + conf.getDouble(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -334,7 +336,8 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, - int heavyEvictionCountLimit, int heavyEvictionBytesSizeLimit) { + int heavyEvictionCountLimit, int heavyEvictionMbSizeLimit, + double heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -372,11 +375,14 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, } // check the bounds - cacheDataBlockPercent = cacheDataBlockPercent > 100 ? 100 : cacheDataBlockPercent; - this.cacheDataBlockPercent = cacheDataBlockPercent < 1 ? 1 : cacheDataBlockPercent; - this.heavyEvictionCountLimit = heavyEvictionCountLimit; - this.heavyEvictionBytesSizeLimit = heavyEvictionBytesSizeLimit; - this.heavyEvictionCount = 0; + this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; + this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; + this.cacheDataBlockPercent = 100; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 1 + ? 1 : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001 + ? 0.001 : heavyEvictionOverheadCoefficient; + this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. @@ -399,6 +405,11 @@ public void setMaxSize(long maxSize) { runEviction(); } } + + @VisibleForTesting + public int getCacheDataBlockPercent() { + return cacheDataBlockPercent; + } /** * The block cached in LRUBlockCache will always be an heap block: on the one side, the heap @@ -444,11 +455,9 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // Don't cache this DATA block when too many blocks evict // and if we have limit on percent of blocks to cache. // It is good for performance (HBASE-23887) - if (heavyEvictionCount > heavyEvictionCountLimit) { - if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { - if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { - return; - } + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; } } @@ -994,8 +1003,11 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { - enteringRun = true; long bytesFreed; + long mbFreedSum = 0; + int heavyEvictionCount = 0; + int freedDataOverheadPercent = 0; + long startTime = System.currentTimeMillis(); while (this.go) { synchronized (this) { try { @@ -1008,14 +1020,42 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; bytesFreed = cache.evict(); + long stopTime = System.currentTimeMillis(); // If heavy cleaning BlockCache control. // It helps avoid put too many blocks into BlockCache // when evict() works very active. - if (bytesFreed > 0 && bytesFreed > cache.heavyEvictionBytesSizeLimit) { - cache.heavyEvictionCount++; - } - else { - cache.heavyEvictionCount = 0; + if (stopTime - startTime <= 1000 * 10 - 1) { + mbFreedSum += bytesFreed/1024/1024; + } else { + freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; + if (mbFreedSum > cache.heavyEvictionMbSizeLimit) { + heavyEvictionCount++; + if (heavyEvictionCount > cache.heavyEvictionCountLimit) { + int ch = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + ch = ch > 15 ? 15 : ch; + ch = ch < 0 ? 0 : ch; + cache.cacheDataBlockPercent -= ch; + cache.cacheDataBlockPercent = cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; + } + } else { + if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { + // It help avoid exit during short-term fluctuation + int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); + cache.cacheDataBlockPercent += ch; + cache.cacheDataBlockPercent = cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; + } else { + heavyEvictionCount = 0; + cache.cacheDataBlockPercent = 100; + } + } + LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + mbFreedSum, freedDataOverheadPercent, + heavyEvictionCount, cache.cacheDataBlockPercent); + + mbFreedSum = 0; + startTime = stopTime; } } } From a9d3c2357b64b15b7bde01897859284b69edd443 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 11:56:37 +0300 Subject: [PATCH 705/769] Update TestLruBlockCache.java new test --- .../hbase/io/hfile/TestLruBlockCache.java | 93 +++++++++++-------- 1 file changed, 53 insertions(+), 40 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index f983d7c1f88e..9523fe5622c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -343,9 +343,9 @@ public void testCacheEvictionThreePriorities() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -468,9 +468,9 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 1.2f, // limit true, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -578,10 +578,9 @@ public void testScanResistance() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); - + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -646,9 +645,10 @@ public void testMaxBlockSize() throws Exception { 1.2f, // limit false, 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); + CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -689,9 +689,9 @@ public void testResizeBlockCache() throws Exception { 1.2f, // limit false, 16 * 1024 * 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -854,9 +854,9 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1046,31 +1046,31 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.34f, // memory 1.2f, // limit false, 1024, - 50, 10, - 10 * 1024 * 1024); + 500, + 0.01); testMultiThreadGetAndEvictBlockInternal(cache); } - - public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exception { - long maxSize = 100000; - int numBlocks = 100; + + public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { + long maxSize = 100000000; + int numBlocks = 100000; final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); final LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - maxSize, - percentOfCachedBlocks, - 0, - 1); + new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + heavyEvictionCountLimit, + 500, + 0.01); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); @@ -1079,23 +1079,36 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc } final String hfileName = "hfile"; - for (int blockIndex = 0; blockIndex <= numBlocks * 5; ++blockIndex) { + for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); cache.cacheBlock(block.cacheKey, block, false); - Thread.sleep(1); } - // Check if all offset (last two digits) of cached blocks less than the percent. - // It means some of blocka were not put into BlockCache - for (BlockCacheKey key : cache.getMapForTests().keySet()) { - Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + evictionThread.evict(); + Thread.sleep(100); + + if (heavyEvictionCountLimit == 0) { + // Check if all offset (last two digits) of cached blocks less than the percent. + // It means some of blocks haven't not put into BlockCache + assertTrue(cache.getCacheDataBlockPercent() < 90); + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + assertTrue(!(key.getOffset() % 100 > 90)); + } + } else { + assertTrue(cache.getCacheDataBlockPercent() == 100); + int counter = 0; + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + if (key.getOffset() % 100 > 90) + counter++; + } + assertTrue(counter > 1000); } } @Test public void testSkipCacheDataBlocks() throws Exception { - for (int percentOfCachedBlocks = 25; percentOfCachedBlocks <= 100; percentOfCachedBlocks+=25) { - testSkipCacheDataBlocksInteral(percentOfCachedBlocks); - } + testSkipCacheDataBlocksInteral(0); + testSkipCacheDataBlocksInteral(100); } + } From 33bd9e1c33e6b797ce392af987cef2e8a7617835 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 12:35:40 +0300 Subject: [PATCH 706/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 7eb40a83decb..1177bb4b8597 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -306,7 +306,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), conf.getDouble(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); From 1ac3816887b965aec9045663adabb050b32ee1ab Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 12:35:45 +0300 Subject: [PATCH 707/769] Update TestLruBlockCache.java --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 9523fe5622c5..a10799717831 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1069,7 +1069,7 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E false, maxSize, heavyEvictionCountLimit, - 500, + 200, 0.01); EvictionThread evictionThread = cache.getEvictionThread(); @@ -1082,6 +1082,8 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); cache.cacheBlock(block.cacheKey, block, false); + if (cache.getCacheDataBlockPercent() < 70) // enough for test + break; } evictionThread.evict(); @@ -1089,12 +1091,13 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E if (heavyEvictionCountLimit == 0) { // Check if all offset (last two digits) of cached blocks less than the percent. - // It means some of blocks haven't not put into BlockCache + // It means some of blocks haven't put into BlockCache assertTrue(cache.getCacheDataBlockPercent() < 90); for (BlockCacheKey key : cache.getMapForTests().keySet()) { assertTrue(!(key.getOffset() % 100 > 90)); } } else { + // Check that auto-scaling is not working (all blocks in BlockCache) assertTrue(cache.getCacheDataBlockPercent() == 100); int counter = 0; for (BlockCacheKey key : cache.getMapForTests().keySet()) { @@ -1103,12 +1106,16 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E } assertTrue(counter > 1000); } + evictionThread.shutdown(); } @Test public void testSkipCacheDataBlocks() throws Exception { + // Check that auto-scaling will work right after start testSkipCacheDataBlocksInteral(0); + // Check that auto-scaling will not work right after start + // (have to finished before auto-scaling) testSkipCacheDataBlocksInteral(100); } - + } From b5610d20d2cbd9815a195db73060a31969efb8de Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 13:10:28 +0300 Subject: [PATCH 708/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 1177bb4b8597..89fa367f3e65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -252,7 +252,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private final int heavyEvictionCountLimit; /** Limit of volume eviction process when start to avoid to cache blocks */ - private final int heavyEvictionMbSizeLimit; + private final long heavyEvictionMbSizeLimit; /** Adjust auto-scaling via overhead of evition rate */ private final double heavyEvictionOverheadCoefficient; @@ -336,7 +336,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, - int heavyEvictionCountLimit, int heavyEvictionMbSizeLimit, + int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, double heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || From 982b119140222351fd06668403c8495171e10a19 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 14:09:24 +0300 Subject: [PATCH 709/769] Update LruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 89fa367f3e65..28ed8a158d13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -335,7 +335,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, int cacheDataBlockPercent, + boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, double heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; From cf291ab918a1aa8dfb1cb2219ceee323f8cf88b2 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 15:49:19 +0300 Subject: [PATCH 710/769] Update LruBlockCache.java --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 28ed8a158d13..2dbd30e61961 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -163,7 +163,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; - private static final double DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01; + private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in @@ -255,7 +255,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private final long heavyEvictionMbSizeLimit; /** Adjust auto-scaling via overhead of evition rate */ - private final double heavyEvictionOverheadCoefficient; + private final float heavyEvictionOverheadCoefficient; /** * Default constructor. Specify maximum size and expected average block @@ -308,7 +308,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Confi DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), - conf.getDouble(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); } @@ -337,7 +337,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, float multiFactor, float memoryFactor, float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, - double heavyEvictionOverheadCoefficient) { + float heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; if(singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { @@ -1133,9 +1133,9 @@ public CacheStats getStats() { } public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( - (4 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + - (6 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + - (4 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); + (5 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + + (7 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + + (1 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); @Override public long heapSize() { From 40b5679c6c63c56d214d956344106d4fddd403ed Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 16:35:33 +0300 Subject: [PATCH 711/769] Update LruBlockCache.java Fixed errors --- .../apache/hadoop/hbase/io/hfile/LruBlockCache.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 2dbd30e61961..5f2492bc65c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -378,10 +378,10 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; this.cacheDataBlockPercent = 100; - heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 1 - ? 1 : heavyEvictionOverheadCoefficient; - heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001 - ? 0.001 : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 0.1f + ? 1f : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001f + ? 0.001f : heavyEvictionOverheadCoefficient; this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log @@ -1135,7 +1135,7 @@ public CacheStats getStats() { public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( (5 * Bytes.SIZEOF_LONG) + (11 * ClassSize.REFERENCE) + (7 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN) + - (1 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); + (2 * Bytes.SIZEOF_INT) + ClassSize.OBJECT); @Override public long heapSize() { From 5a92b6387e11683c72dd32dc9a686be93c2573e7 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 18:08:17 +0300 Subject: [PATCH 712/769] Update TestLruBlockCache.java Fix --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index a10799717831..07b527f9e4bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -345,7 +345,7 @@ public void testCacheEvictionThreePriorities() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -470,7 +470,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -580,7 +580,7 @@ public void testScanResistance() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -647,7 +647,7 @@ public void testMaxBlockSize() throws Exception { 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); CachedItem [] small = generateFixedBlocks(15, 600, "small"); @@ -691,7 +691,7 @@ public void testResizeBlockCache() throws Exception { 16 * 1024 * 1024, 10, 500, - 0.01); + 0.01f); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -856,7 +856,7 @@ public void testCacheBlockNextBlockMetadataMissing() { 1024, 10, 500, - 0.01); + 0.01f); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -1048,7 +1048,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { false, 1024, 10, 500, - 0.01); + 0.01f); testMultiThreadGetAndEvictBlockInternal(cache); } @@ -1070,7 +1070,7 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E maxSize, heavyEvictionCountLimit, 200, - 0.01); + 0.01f); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); From 05117038472763c6f817d306f5baf8e02c65ebc7 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 21:00:29 +0300 Subject: [PATCH 713/769] Update LruBlockCache.java fixed codestyle --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 5f2492bc65c6..1698b62237e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -160,7 +160,7 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = "hbase.lru.cache.heavy.eviction.mb.size.limit"; private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; - + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; @@ -335,7 +335,7 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, + boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, float heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; @@ -405,7 +405,7 @@ public void setMaxSize(long maxSize) { runEviction(); } } - + @VisibleForTesting public int getCacheDataBlockPercent() { return cacheDataBlockPercent; @@ -1027,7 +1027,8 @@ public void run() { if (stopTime - startTime <= 1000 * 10 - 1) { mbFreedSum += bytesFreed/1024/1024; } else { - freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; + freedDataOverheadPercent = + (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; if (mbFreedSum > cache.heavyEvictionMbSizeLimit) { heavyEvictionCount++; if (heavyEvictionCount > cache.heavyEvictionCountLimit) { @@ -1035,14 +1036,16 @@ public void run() { ch = ch > 15 ? 15 : ch; ch = ch < 0 ? 0 : ch; cache.cacheDataBlockPercent -= ch; - cache.cacheDataBlockPercent = cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = + cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; } } else { if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { // It help avoid exit during short-term fluctuation int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); cache.cacheDataBlockPercent += ch; - cache.cacheDataBlockPercent = cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = + cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; } else { heavyEvictionCount = 0; cache.cacheDataBlockPercent = 100; From 69ea5efad98690355219ad97aeeea687f7781456 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sat, 6 Jun 2020 21:00:31 +0300 Subject: [PATCH 714/769] Update TestLruBlockCache.java fixed codestyle --- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 07b527f9e4bd..ee3593ec36f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1060,7 +1060,8 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E final LruBlockCache cache = new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.5f, // min 0.99f, // acceptable 0.33f, // single 0.33f, // multi @@ -1082,8 +1083,10 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); cache.cacheBlock(block.cacheKey, block, false); - if (cache.getCacheDataBlockPercent() < 70) // enough for test + if (cache.getCacheDataBlockPercent() < 70) { + // enough for test break; + } } evictionThread.evict(); @@ -1101,8 +1104,9 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E assertTrue(cache.getCacheDataBlockPercent() == 100); int counter = 0; for (BlockCacheKey key : cache.getMapForTests().keySet()) { - if (key.getOffset() % 100 > 90) + if (key.getOffset() % 100 > 90) { counter++; + } } assertTrue(counter > 1000); } From e48d84f59c2547448fd65d4e5e7a8cc1e83f298f Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Sun, 7 Jun 2020 08:21:54 +0300 Subject: [PATCH 715/769] Update TestLruBlockCache.java --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index ee3593ec36f1..6f572bd1bff3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1051,7 +1051,7 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { 0.01f); testMultiThreadGetAndEvictBlockInternal(cache); } - + public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { long maxSize = 100000000; int numBlocks = 100000; @@ -1060,7 +1060,7 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E final LruBlockCache cache = new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min 0.99f, // acceptable 0.33f, // single From 3557dc37fd4e18f7f54b5f968d943ef7f27da53f Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 11:07:56 +0300 Subject: [PATCH 716/769] Update LruBlockCache.java Fixed remarks --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 85 ++++++++++++++++--- 1 file changed, 72 insertions(+), 13 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 1698b62237e5..0cb632968fb2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -155,14 +155,18 @@ public class LruBlockCache implements FirstLevelBlockCache { private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = "hbase.lru.cache.heavy.eviction.count.limit"; - private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 10; + // Default value actually equal to disable feature of increasing performance. + // Because 2147483647 is about ~680 years (after that it will start to work) + // We can set it to 0-10 and get the profit right now. + // (see details https://issues.apache.org/jira/browse/HBASE-23887). + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 2147483647; private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + = "hbase.lru.cache.heavy.eviction.mb.size.limit"; private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT - = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; /** @@ -452,10 +456,13 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - // Don't cache this DATA block when too many blocks evict - // and if we have limit on percent of blocks to cache. - // It is good for performance (HBASE-23887) + // Some data blocks will not put into BlockCache when eviction rate too much. + // It is good for performance + // (see details: https://issues.apache.org/jira/browse/HBASE-23887) + // How to calculate it can find inside EvictionThread class. if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + // It works like filter - blocks which two last digits of offset + // more than we calculate in Eviction Thread will not put into BlockCache if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { return; } @@ -715,6 +722,11 @@ long getOverhead() { /** * Eviction method. + * + * Evict items in order of use, allowing delete items + * which haven't been used for the longest amount of time. + * + * @return how many bytes were freed */ long evict() { @@ -837,7 +849,7 @@ long evict() { stats.evict(); evictionInProgress = false; evictionLock.unlock(); - return bytesToFree; + return bytesFreed; } } @@ -1020,33 +1032,80 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; bytesFreed = cache.evict(); + /* + * Sometimes we are reading more data than can fit into BlockCache + * and it is the cause a high rate of evictions. + * This in turn leads to heavy Garbage Collector works. + * So a lot of blocks put into BlockCache but never read, + * but spending a lot of CPU resources. + * Here we will analyze how many bytes were freed and decide + * decide whether the time has come to reduce amount of caching blocks. + * It help avoid put too many blocks into BlockCache + * when evict() works very active and save CPU for other jobs. + * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + */ + + // First of all we have to control how much time + // has passed since previuos evict() was launched + // This is should be almost the same time (+/- 10s) + // because we get comparable volumes of freed bytes each time. + // 10s because this is default period to run evict() (see above this.wait) long stopTime = System.currentTimeMillis(); - // If heavy cleaning BlockCache control. - // It helps avoid put too many blocks into BlockCache - // when evict() works very active. if (stopTime - startTime <= 1000 * 10 - 1) { mbFreedSum += bytesFreed/1024/1024; } else { + // Here we have to calc what situation we have got. + // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" + // and can calculte overhead on it. + // We will use this information to decide, + // how to change percent of caching blocks. freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; - if (mbFreedSum > cache.heavyEvictionMbSizeLimit) { + if (freedDataOverheadPercent > 100) { + // Now we are in the situation when we are above the limit + // But maybe we are going to ignore it because it will end quite soon heavyEvictionCount++; if (heavyEvictionCount > cache.heavyEvictionCountLimit) { + // It is going for a long time and we have to reduce of caching + // blocks now. So we calculate here how many blocks we want to skip. + // It depends on: + // 1. Overhead - if overhead is big we could more aggressive + // reducing amount of caching blocks. + // 2. How fast we want to get the result. If we know that our + // heavy reading for a long time, we don't want to wait and can + // increase the coefficient and get good performance quite soon. + // But if we don't sure we can do it slowly and it could prevent + // premature exit from this mode. So, when the coefficient is + // higher we can get better performance when heavy reading is stable. + // But when reading is changing we can adjust to it and set + // the coefficient to lower value. int ch = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + // But practice shows that 15% of reducing is quite enough. + // We are not greedy (it could lead to premature exit). ch = ch > 15 ? 15 : ch; - ch = ch < 0 ? 0 : ch; + ch = ch < 0 ? 0 : ch; // I think it will never happen but check for sure + // So this is the key point, here we are reducing % of caching blocks cache.cacheDataBlockPercent -= ch; + // If we go down too deep we have to stop here, 1% any way should be. cache.cacheDataBlockPercent = cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; } } else { + // Well, we have got overshooting. + // Mayby it is just short-term fluctuation and we can stay in this mode. + // It help avoid permature exit during short-term fluctuation. + // If overshooting less than 90%, we will try to increase the percent of + // caching blocks and hope it is enough. if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { - // It help avoid exit during short-term fluctuation + // Simple logic: more overshooting - more caching blocks (backpressure) int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); cache.cacheDataBlockPercent += ch; + // But it can't be more then 100%, so check it. cache.cacheDataBlockPercent = cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; } else { + // Looks like heavy reading is over. + // Just exit form this mode. heavyEvictionCount = 0; cache.cacheDataBlockPercent = 100; } From ac006ba51dc6544242a69eb9a760161bbb71576d Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 11:09:17 +0300 Subject: [PATCH 717/769] Update TestLruBlockCache.java Fixed indents --- .../hbase/io/hfile/TestLruBlockCache.java | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 6f572bd1bff3..b30575ebc55c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1059,19 +1059,19 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); final LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.5f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - maxSize, - heavyEvictionCountLimit, - 200, - 0.01f); + new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + heavyEvictionCountLimit, + 200, + 0.01f); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); From 765e18c162cf592d6c225f05d7d71a0129e4300f Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 12:49:03 +0300 Subject: [PATCH 718/769] Update LruBlockCache.java fixed bug --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0cb632968fb2..0aff194ae01d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -849,7 +849,7 @@ long evict() { stats.evict(); evictionInProgress = false; evictionLock.unlock(); - return bytesFreed; + return bytesToFree; } } From 33adc35311cacb12f39142aa3bfda68af4c3a8e7 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 8 Jun 2020 21:02:02 +0300 Subject: [PATCH 719/769] Update LruBlockCache.java Fixed bug --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0aff194ae01d..4fb0073b86e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1061,7 +1061,7 @@ public void run() { // how to change percent of caching blocks. freedDataOverheadPercent = (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; - if (freedDataOverheadPercent > 100) { + if (freedDataOverheadPercent > 0) { // Now we are in the situation when we are above the limit // But maybe we are going to ignore it because it will end quite soon heavyEvictionCount++; From 09387ee9ddc1bad604daaa60c0216ae50607eba1 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 9 Jun 2020 13:56:11 +0300 Subject: [PATCH 720/769] Update LruBlockCache.java corrected by review --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 42 +++++++++---------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 4fb0073b86e4..93479332d369 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -159,7 +159,7 @@ public class LruBlockCache implements FirstLevelBlockCache { // Because 2147483647 is about ~680 years (after that it will start to work) // We can set it to 0-10 and get the profit right now. // (see details https://issues.apache.org/jira/browse/HBASE-23887). - private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = 2147483647; + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = "hbase.lru.cache.heavy.eviction.mb.size.limit"; @@ -1015,8 +1015,7 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { - long bytesFreed; - long mbFreedSum = 0; + long freedSumMb = 0; int heavyEvictionCount = 0; int freedDataOverheadPercent = 0; long startTime = System.currentTimeMillis(); @@ -1031,7 +1030,7 @@ public void run() { } LruBlockCache cache = this.cache.get(); if (cache == null) break; - bytesFreed = cache.evict(); + freedSumMb += cache.evict()/1024/1024; /* * Sometimes we are reading more data than can fit into BlockCache * and it is the cause a high rate of evictions. @@ -1051,16 +1050,14 @@ public void run() { // because we get comparable volumes of freed bytes each time. // 10s because this is default period to run evict() (see above this.wait) long stopTime = System.currentTimeMillis(); - if (stopTime - startTime <= 1000 * 10 - 1) { - mbFreedSum += bytesFreed/1024/1024; - } else { + if ((stopTime - startTime) > 1000 * 10 - 1) { // Here we have to calc what situation we have got. // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" // and can calculte overhead on it. // We will use this information to decide, // how to change percent of caching blocks. freedDataOverheadPercent = - (int) (mbFreedSum * 100 / cache.heavyEvictionMbSizeLimit) - 100; + (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; if (freedDataOverheadPercent > 0) { // Now we are in the situation when we are above the limit // But maybe we are going to ignore it because it will end quite soon @@ -1079,16 +1076,15 @@ public void run() { // higher we can get better performance when heavy reading is stable. // But when reading is changing we can adjust to it and set // the coefficient to lower value. - int ch = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); // But practice shows that 15% of reducing is quite enough. // We are not greedy (it could lead to premature exit). - ch = ch > 15 ? 15 : ch; - ch = ch < 0 ? 0 : ch; // I think it will never happen but check for sure + change = Math.min(15, change); + change = Math.max(0, change); // I think it will never happen but check for sure // So this is the key point, here we are reducing % of caching blocks - cache.cacheDataBlockPercent -= ch; + cache.cacheDataBlockPercent -= change; // If we go down too deep we have to stop here, 1% any way should be. - cache.cacheDataBlockPercent = - cache.cacheDataBlockPercent < 1 ? 1 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); } } else { // Well, we have got overshooting. @@ -1096,13 +1092,13 @@ public void run() { // It help avoid permature exit during short-term fluctuation. // If overshooting less than 90%, we will try to increase the percent of // caching blocks and hope it is enough. - if (mbFreedSum >= cache.heavyEvictionMbSizeLimit * 0.1) { + if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { // Simple logic: more overshooting - more caching blocks (backpressure) - int ch = (int) (-freedDataOverheadPercent * 0.1 + 1); - cache.cacheDataBlockPercent += ch; + int change = (int) (-freedDataOverheadPercent * 0.1 + 1); + cache.cacheDataBlockPercent += change; // But it can't be more then 100%, so check it. cache.cacheDataBlockPercent = - cache.cacheDataBlockPercent > 100 ? 100 : cache.cacheDataBlockPercent; + cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); } else { // Looks like heavy reading is over. // Just exit form this mode. @@ -1111,12 +1107,12 @@ public void run() { } } LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + - "heavy eviction counter: {}, " + - "current caching DataBlock (%): {}", - mbFreedSum, freedDataOverheadPercent, - heavyEvictionCount, cache.cacheDataBlockPercent); + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, + heavyEvictionCount, cache.cacheDataBlockPercent); - mbFreedSum = 0; + freedSumMb = 0; startTime = stopTime; } } From 23bdbe7ab64af157f63d0ddf7f81578d93ae935b Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 9 Jun 2020 18:44:12 +0300 Subject: [PATCH 721/769] Update LruBlockCache.java fixed codestyle --- .../org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 93479332d369..3f9337dc059d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1076,7 +1076,8 @@ public void run() { // higher we can get better performance when heavy reading is stable. // But when reading is changing we can adjust to it and set // the coefficient to lower value. - int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + int change = + (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); // But practice shows that 15% of reducing is quite enough. // We are not greedy (it could lead to premature exit). change = Math.min(15, change); @@ -1097,8 +1098,7 @@ public void run() { int change = (int) (-freedDataOverheadPercent * 0.1 + 1); cache.cacheDataBlockPercent += change; // But it can't be more then 100%, so check it. - cache.cacheDataBlockPercent = - cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); + cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); } else { // Looks like heavy reading is over. // Just exit form this mode. From 1971a2c96078615591be6362125106903eaef88e Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 9 Jun 2020 18:47:36 +0300 Subject: [PATCH 722/769] Update LruBlockCache.java removeed whitespaces --- .../hadoop/hbase/io/hfile/LruBlockCache.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 3f9337dc059d..6e02a9ef3f8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -461,7 +461,7 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // (see details: https://issues.apache.org/jira/browse/HBASE-23887) // How to calculate it can find inside EvictionThread class. if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { - // It works like filter - blocks which two last digits of offset + // It works like filter - blocks which two last digits of offset // more than we calculate in Eviction Thread will not put into BlockCache if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { return; @@ -1031,7 +1031,7 @@ public void run() { LruBlockCache cache = this.cache.get(); if (cache == null) break; freedSumMb += cache.evict()/1024/1024; - /* + /* * Sometimes we are reading more data than can fit into BlockCache * and it is the cause a high rate of evictions. * This in turn leads to heavy Garbage Collector works. @@ -1043,8 +1043,8 @@ public void run() { * when evict() works very active and save CPU for other jobs. * More delails: https://issues.apache.org/jira/browse/HBASE-23887 */ - - // First of all we have to control how much time + + // First of all we have to control how much time // has passed since previuos evict() was launched // This is should be almost the same time (+/- 10s) // because we get comparable volumes of freed bytes each time. @@ -1068,13 +1068,13 @@ public void run() { // It depends on: // 1. Overhead - if overhead is big we could more aggressive // reducing amount of caching blocks. - // 2. How fast we want to get the result. If we know that our - // heavy reading for a long time, we don't want to wait and can + // 2. How fast we want to get the result. If we know that our + // heavy reading for a long time, we don't want to wait and can // increase the coefficient and get good performance quite soon. - // But if we don't sure we can do it slowly and it could prevent - // premature exit from this mode. So, when the coefficient is + // But if we don't sure we can do it slowly and it could prevent + // premature exit from this mode. So, when the coefficient is // higher we can get better performance when heavy reading is stable. - // But when reading is changing we can adjust to it and set + // But when reading is changing we can adjust to it and set // the coefficient to lower value. int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); @@ -1088,10 +1088,10 @@ public void run() { cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); } } else { - // Well, we have got overshooting. + // Well, we have got overshooting. // Mayby it is just short-term fluctuation and we can stay in this mode. // It help avoid permature exit during short-term fluctuation. - // If overshooting less than 90%, we will try to increase the percent of + // If overshooting less than 90%, we will try to increase the percent of // caching blocks and hope it is enough. if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { // Simple logic: more overshooting - more caching blocks (backpressure) @@ -1100,7 +1100,7 @@ public void run() { // But it can't be more then 100%, so check it. cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); } else { - // Looks like heavy reading is over. + // Looks like heavy reading is over. // Just exit form this mode. heavyEvictionCount = 0; cache.cacheDataBlockPercent = 100; From d887ba99784f39a1e9844d8d08a6bb3e5e775c02 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Wed, 10 Jun 2020 00:03:49 +0300 Subject: [PATCH 723/769] Update LruBlockCache.java fixed whitespaces --- .../org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 6e02a9ef3f8d..4bbe03f3a684 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1054,7 +1054,7 @@ public void run() { // Here we have to calc what situation we have got. // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" // and can calculte overhead on it. - // We will use this information to decide, + // We will use this information to decide, // how to change percent of caching blocks. freedDataOverheadPercent = (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; @@ -1065,7 +1065,7 @@ public void run() { if (heavyEvictionCount > cache.heavyEvictionCountLimit) { // It is going for a long time and we have to reduce of caching // blocks now. So we calculate here how many blocks we want to skip. - // It depends on: + // It depends on: // 1. Overhead - if overhead is big we could more aggressive // reducing amount of caching blocks. // 2. How fast we want to get the result. If we know that our @@ -1076,7 +1076,7 @@ public void run() { // higher we can get better performance when heavy reading is stable. // But when reading is changing we can adjust to it and set // the coefficient to lower value. - int change = + int change = (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); // But practice shows that 15% of reducing is quite enough. // We are not greedy (it could lead to premature exit). From a3da481a480a828819ff3c2e1c8f2d2437571ff5 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Wed, 10 Jun 2020 08:27:54 +0300 Subject: [PATCH 724/769] Update LruBlockCache.java fixed bug --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 4bbe03f3a684..5463d0487c84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1015,6 +1015,7 @@ public EvictionThread(LruBlockCache cache) { @Override public void run() { + enteringRun = true; long freedSumMb = 0; int heavyEvictionCount = 0; int freedDataOverheadPercent = 0; From 6b1f7c41f907f51c575bcda5da6c514fa042d06b Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Thu, 11 Jun 2020 10:18:42 +0300 Subject: [PATCH 725/769] Update LruBlockCache.java simplify --- .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 5463d0487c84..8377e9a6655b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1059,7 +1059,7 @@ public void run() { // how to change percent of caching blocks. freedDataOverheadPercent = (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; - if (freedDataOverheadPercent > 0) { + if (freedSumMb > cache.heavyEvictionMbSizeLimit) { // Now we are in the situation when we are above the limit // But maybe we are going to ignore it because it will end quite soon heavyEvictionCount++; From 1e7d0ea7ec8ad46ee6e31101d372b19a8418dfd4 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:06:52 +0300 Subject: [PATCH 726/769] Added AdaptiveLruBlockCache --- .../hbase/io/hfile/AdaptiveLruBlockCache.java | 1433 +++++++++++++++++ .../hbase/io/hfile/BlockCacheFactory.java | 2 + .../io/hfile/TestAdaptiveLruBlockCache.java | 1124 +++++++++++++ .../hbase/io/hfile/TestLruBlockCache.java | 299 ++-- 4 files changed, 2662 insertions(+), 196 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java new file mode 100644 index 000000000000..a57464bb1d6d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -0,0 +1,1433 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static java.util.Objects.requireNonNull; + +import java.lang.ref.WeakReference; +import java.util.EnumMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; +import org.apache.hbase.thirdparty.com.google.common.base.Objects; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * This realisation improve performance of classical LRU cache up to 3 times via reduce GC job. + *

    + * The classical block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an + * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a + * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} + * operations. + *

    + * Contains three levels of block priority to allow for scan-resistance and in-memory families + * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An + * in-memory column family is a column family that should be served from memory if possible): + * single-access, multiple-accesses, and in-memory priority. A block is added with an in-memory + * priority flag if {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptor#isInMemory()}, + * otherwise a block becomes a single access priority the first time it is read into this block + * cache. If a block is accessed again while in cache, it is marked as a multiple access priority + * block. This delineation of blocks is used to prevent scans from thrashing the cache adding a + * least-frequently-used element to the eviction algorithm. + *

    + * Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each + * priority will retain close to its maximum size, however, if any priority is not using its entire + * chunk the others are able to grow beyond their chunk size. + *

    + * Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The + * block size is not especially important as this cache is fully dynamic in its sizing of blocks. It + * is only used for pre-allocating data structures and in initial heap estimation of the map. + *

    + * The detailed constructor defines the sizes for the three priorities (they should total to the + * maximum size defined). It also sets the levels that trigger and control the eviction + * thread. + *

    + * The acceptable size is the cache size level which triggers the eviction process to + * start. It evicts enough blocks to get the size below the minimum size specified. + *

    + * Eviction happens in a separate thread and involves a single full-scan of the map. It determines + * how many bytes must be freed to reach the minimum size, and then while scanning determines the + * fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times + * bytes to free). It then uses the priority chunk sizes to evict fairly according to the relative + * sizes and usage. + *

    + * Adaptive LRU cache lets speed up performance while we are reading much more data than can fit + * into BlockCache and it is the cause of a high rate of evictions. This in turn leads to heavy + * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending + * a lot of CPU resources for cleaning. We could avoid this situation via parameters: + *

    + * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the + * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it + * meats the feature will start at the beginning. But if we have some times short reading the same + * data and some times long-term reading - we can divide it by this parameter. For example we know + * that our short reading used to be about 1 minutes, then we have to set the parameter about 10 + * and it will enable the feature only for long time massive reading (after ~100 seconds). So when + * we use short-reading and want all of them in the cache we will have it (except for eviction of + * course). When we use long-term heavy reading the feature will be enabled after some time and + * bring better performance. + *

    + * hbase.lru.cache.heavy.eviction.mb.size.limit - set how many bytes in 10 seconds desirable + * putting into BlockCache (and evicted from it). The feature will try to reach this value and + * maintain it. Don't try to set it too small because it leads to premature exit from this mode. + * For powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system + * (~10 cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. + * How it works: we set the limit and after each ~10 second calculate how many bytes were freed. + * Overhead = Freed Bytes Sum (MB) * 100 / Limit (MB) - 100; + * For example we set the limit = 500 and were evicted 2000 MB. Overhead is: + * 2000 * 100 / 500 - 100 = 300% + * The feature is going to reduce a percent caching data blocks and fit evicted bytes closer to + * 100% (500 MB). Some kind of an auto-scaling. + * If freed bytes less then the limit we have got negative overhead. + * For example if were freed 200 MB: + * 200 * 100 / 500 - 100 = -60% + * The feature will increase the percent of caching blocks. + * That leads to fit evicted bytes closer to 100% (500 MB). + * The current situation we can find out in the log of RegionServer: + * BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current caching + * DataBlock (%): 100 < no eviction, 100% blocks is caching + * BlockCache evicted (MB): 2000, overhead (%): 300, heavy eviction counter: 1, current caching + * DataBlock (%): 97 < eviction begin, reduce of caching blocks by 3%. + * It help to tune your system and find out what value is better set. Don't try to reach 0% + * overhead, it is impossible. Quite good 50-100% overhead, + * it prevents premature exit from this mode. + *

    + * hbase.lru.cache.heavy.eviction.overhead.coefficient - set how fast we want to get the + * result. If we know that our reading is heavy for a long time, we don't want to wait and can + * increase the coefficient and get good performance sooner. But if we aren't sure we can do it + * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher + * we can get better performance when heavy reading is stable. But when reading is changing we + * can adjust to it and set the coefficient to lower value. + * For example, we set the coefficient = 0.01. It means the overhead (see above) will be + * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, + * if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. + * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term + * fluctuation and we will try to stay in this mode. It helps avoid premature exit during + * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. + *

    + * Find more information about improvement: https://issues.apache.org/jira/browse/HBASE-23887 + */ +@InterfaceAudience.Private +public class AdaptiveLruBlockCache implements FirstLevelBlockCache { + + private static final Logger LOG = LoggerFactory.getLogger(AdaptiveLruBlockCache.class); + + /** + * Percentage of total size that eviction will evict until; e.g. if set to .8, then we will keep + * evicting during an eviction run till the cache size is down to 80% of the total. + */ + private static final String LRU_MIN_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.min.factor"; + + /** + * Acceptable size of cache (no evictions if size < acceptable) + */ + private static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = + "hbase.lru.blockcache.acceptable.factor"; + + /** + * Hard capacity limit of cache, will reject any put if size > this * acceptable + */ + static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = + "hbase.lru.blockcache.hard.capacity.limit.factor"; + private static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.single.percentage"; + private static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.multi.percentage"; + private static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = + "hbase.lru.blockcache.memory.percentage"; + + /** + * Configuration key to force data-block always (except in-memory are too much) + * cached in memory for in-memory hfile, unlike inMemory, which is a column-family + * configuration, inMemoryForceMode is a cluster-wide configuration + */ + private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = + "hbase.lru.rs.inmemoryforcemode"; + + /* Default Configuration Parameters*/ + + /* Backing Concurrent Map Configuration */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /* Eviction thresholds */ + private static final float DEFAULT_MIN_FACTOR = 0.95f; + static final float DEFAULT_ACCEPTABLE_FACTOR = 0.99f; + + /* Priority buckets */ + private static final float DEFAULT_SINGLE_FACTOR = 0.25f; + private static final float DEFAULT_MULTI_FACTOR = 0.50f; + private static final float DEFAULT_MEMORY_FACTOR = 0.25f; + + private static final float DEFAULT_HARD_CAPACITY_LIMIT_FACTOR = 1.2f; + + private static final boolean DEFAULT_IN_MEMORY_FORCE_MODE = false; + + /* Statistics thread */ + private static final int STAT_THREAD_PERIOD = 60 * 5; + private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; + private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; + + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT + = "hbase.lru.cache.heavy.eviction.count.limit"; + // Default value actually equal to disable feature of increasing performance. + // Because 2147483647 is about ~680 years (after that it will start to work) + // We can set it to 0-10 and get the profit right now. + // (see details https://issues.apache.org/jira/browse/HBASE-23887). + private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; + + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT + = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; + + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT + = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; + + /** + * Defined the cache map as {@link ConcurrentHashMap} here, because in + * {@link AdaptiveLruBlockCache#getBlock}, we need to guarantee the atomicity of map#computeIfPresent + * (key, func). Besides, the func method must execute exactly once only when the key is present + * and under the lock context, otherwise the reference count will be messed up. Notice that the + * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + */ + private transient final ConcurrentHashMap map; + + /** Eviction lock (locked when eviction in process) */ + private transient final ReentrantLock evictionLock = new ReentrantLock(true); + + private final long maxBlockSize; + + /** Volatile boolean to track if we are in an eviction process or not */ + private volatile boolean evictionInProgress = false; + + /** Eviction thread */ + private transient final EvictionThread evictionThread; + + /** Statistics thread schedule pool (for heavy debugging, could remove) */ + private transient final ScheduledExecutorService scheduleThreadPool = + Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setNameFormat("AdaptiveLruBlockCacheStatsExecutor").setDaemon(true).build()); + + /** Current size of cache */ + private final AtomicLong size; + + /** Current size of data blocks */ + private final LongAdder dataBlockSize; + + /** Current number of cached elements */ + private final AtomicLong elements; + + /** Current number of cached data block elements */ + private final LongAdder dataBlockElements; + + /** Cache access count (sequential ID) */ + private final AtomicLong count; + + /** hard capacity limit */ + private float hardCapacityLimitFactor; + + /** Cache statistics */ + private final CacheStats stats; + + /** Maximum allowable size of cache (block put if size > max, evict) */ + private long maxSize; + + /** Approximate block size */ + private long blockSize; + + /** Acceptable size of cache (no evictions if size < acceptable) */ + private float acceptableFactor; + + /** Minimum threshold of cache (when evicting, evict until size < min) */ + private float minFactor; + + /** Single access bucket size */ + private float singleFactor; + + /** Multiple access bucket size */ + private float multiFactor; + + /** In-memory bucket size */ + private float memoryFactor; + + /** Overhead of the structure itself */ + private long overhead; + + /** Whether in-memory hfile's data block has higher priority when evicting */ + private boolean forceInMemory; + + /** + * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an + * external cache as L2. + * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + */ + private transient BlockCache victimHandler = null; + + /** Percent of cached data blocks */ + private volatile int cacheDataBlockPercent; + + /** Limit of count eviction process when start to avoid to cache blocks */ + private final int heavyEvictionCountLimit; + + /** Limit of volume eviction process when start to avoid to cache blocks */ + private final long heavyEvictionMbSizeLimit; + + /** Adjust auto-scaling via overhead of evition rate */ + private final float heavyEvictionOverheadCoefficient; + + /** + * Default constructor. Specify maximum size and expected average block + * size (approximation is fine). + * + *

    All other factors will be calculated based on defaults specified in + * this class. + * + * @param maxSize maximum size of cache, in bytes + * @param blockSize approximate size of each block, in bytes + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize) { + this(maxSize, blockSize, true); + } + + /** + * Constructor used for testing. Allows disabling of the eviction thread. + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread) { + this(maxSize, blockSize, evictionThread, + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, + DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, + DEFAULT_SINGLE_FACTOR, + DEFAULT_MULTI_FACTOR, + DEFAULT_MEMORY_FACTOR, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, + false, + DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); + } + + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { + this(maxSize, blockSize, evictionThread, + (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, + DEFAULT_CONCURRENCY_LEVEL, + conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), + conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), + conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), + conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), + conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), + conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); + } + + public AdaptiveLruBlockCache(long maxSize, long blockSize, Configuration conf) { + this(maxSize, blockSize, true, conf); + } + + /** + * Configurable constructor. Use this constructor if not using defaults. + * + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not + * @param mapInitialSize initial size of backing ConcurrentHashMap + * @param mapLoadFactor initial load factor of backing ConcurrentHashMap + * @param mapConcurrencyLevel initial concurrency factor for backing CHM + * @param minFactor percentage of total size that eviction will evict until + * @param acceptableFactor percentage of total size that triggers eviction + * @param singleFactor percentage of total size for single-access blocks + * @param multiFactor percentage of total size for multiple-access blocks + * @param memoryFactor percentage of total size for in-memory blocks + */ + public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread, + int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, + float minFactor, float acceptableFactor, float singleFactor, + float multiFactor, float memoryFactor, float hardLimitFactor, + boolean forceInMemory, long maxBlockSize, + int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, + float heavyEvictionOverheadCoefficient) { + this.maxBlockSize = maxBlockSize; + if(singleFactor + multiFactor + memoryFactor != 1 || + singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { + throw new IllegalArgumentException("Single, multi, and memory factors " + + " should be non-negative and total 1.0"); + } + if (minFactor >= acceptableFactor) { + throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); + } + if (minFactor >= 1.0f || acceptableFactor >= 1.0f) { + throw new IllegalArgumentException("all factors must be < 1"); + } + this.maxSize = maxSize; + this.blockSize = blockSize; + this.forceInMemory = forceInMemory; + map = new ConcurrentHashMap<>(mapInitialSize, mapLoadFactor, mapConcurrencyLevel); + this.minFactor = minFactor; + this.acceptableFactor = acceptableFactor; + this.singleFactor = singleFactor; + this.multiFactor = multiFactor; + this.memoryFactor = memoryFactor; + this.stats = new CacheStats(this.getClass().getSimpleName()); + this.count = new AtomicLong(0); + this.elements = new AtomicLong(0); + this.dataBlockElements = new LongAdder(); + this.dataBlockSize = new LongAdder(); + this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel); + this.size = new AtomicLong(this.overhead); + this.hardCapacityLimitFactor = hardLimitFactor; + if (evictionThread) { + this.evictionThread = new EvictionThread(this); + this.evictionThread.start(); // FindBugs SC_START_IN_CTOR + } else { + this.evictionThread = null; + } + + // check the bounds + this.heavyEvictionCountLimit = heavyEvictionCountLimit < 0 ? 0 : heavyEvictionCountLimit; + this.heavyEvictionMbSizeLimit = heavyEvictionMbSizeLimit < 1 ? 1 : heavyEvictionMbSizeLimit; + this.cacheDataBlockPercent = 100; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient > 0.1f + ? 1f : heavyEvictionOverheadCoefficient; + heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient < 0.001f + ? 0.001f : heavyEvictionOverheadCoefficient; + this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; + + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // every five minutes. + this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, + STAT_THREAD_PERIOD, TimeUnit.SECONDS); + } + + @Override + public void setVictimCache(BlockCache victimCache) { + if (victimHandler != null) { + throw new IllegalArgumentException("The victim cache has already been set"); + } + victimHandler = requireNonNull(victimCache); + } + + @Override + public void setMaxSize(long maxSize) { + this.maxSize = maxSize; + if (this.size.get() > acceptableSize() && !evictionInProgress) { + runEviction(); + } + } + + public int getCacheDataBlockPercent() { + return cacheDataBlockPercent; + } + + /** + * The block cached in AdaptiveLruBlockCache will always be an heap block: on the one side, the heap + * access will be more faster then off-heap, the small index block or meta block cached in + * CombinedBlockCache will benefit a lot. on other side, the AdaptiveLruBlockCache size is always + * calculated based on the total heap size, if caching an off-heap block in AdaptiveLruBlockCache, the + * heap size will be messed up. Here we will clone the block into an heap block if it's an + * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of + * the block (HBASE-22127):
    + * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
    + * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's + * reservoir, if both RPC and AdaptiveLruBlockCache release the block, then it can be garbage collected by + * JVM, so need a retain here. + * @param buf the original block + * @return an block with an heap memory backend. + */ + private Cacheable asReferencedHeapBlock(Cacheable buf) { + if (buf instanceof HFileBlock) { + HFileBlock blk = ((HFileBlock) buf); + if (blk.isSharedMem()) { + return HFileBlock.deepCloneOnHeap(blk); + } + } + // The block will be referenced by this AdaptiveLruBlockCache, so should increase its refCnt here. + return buf.retain(); + } + + // BlockCache implementation + + /** + * Cache the block with the specified name and buffer. + *

    + * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) + * this can happen, for which we compare the buffer contents. + * + * @param cacheKey block's cache key + * @param buf block buffer + * @param inMemory if block is in-memory + */ + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { + + // Some data blocks will not put into BlockCache when eviction rate too much. + // It is good for performance + // (see details: https://issues.apache.org/jira/browse/HBASE-23887) + // How to calculate it can find inside EvictionThread class. + if (cacheDataBlockPercent != 100 && buf.getBlockType().isData()) { + // It works like filter - blocks which two last digits of offset + // more than we calculate in Eviction Thread will not put into BlockCache + if (cacheKey.getOffset() % 100 >= cacheDataBlockPercent) { + return; + } + } + + if (buf.heapSize() > maxBlockSize) { + // If there are a lot of blocks that are too + // big this can make the logs way too noisy. + // So we log 2% + if (stats.failInsert() % 50 == 0) { + LOG.warn("Trying to cache too large a block " + + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + + " is " + buf.heapSize() + + " which is larger than " + maxBlockSize); + } + return; + } + + LruCachedBlock cb = map.get(cacheKey); + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, buf)) { + return; + } + long currentSize = size.get(); + long currentAcceptableSize = acceptableSize(); + long hardLimitSize = (long) (hardCapacityLimitFactor * currentAcceptableSize); + if (currentSize >= hardLimitSize) { + stats.failInsert(); + if (LOG.isTraceEnabled()) { + LOG.trace("AdaptiveLruBlockCache current size " + StringUtils.byteDesc(currentSize) + + " has exceeded acceptable size " + StringUtils.byteDesc(currentAcceptableSize) + "." + + " The hard limit size is " + StringUtils.byteDesc(hardLimitSize) + + ", failed to put cacheKey:" + cacheKey + " into AdaptiveLruBlockCache."); + } + if (!evictionInProgress) { + runEviction(); + } + return; + } + // Ensure that the block is an heap one. + buf = asReferencedHeapBlock(buf); + cb = new LruCachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory); + long newSize = updateSizeMetrics(cb, false); + map.put(cacheKey, cb); + long val = elements.incrementAndGet(); + if (buf.getBlockType().isData()) { + dataBlockElements.increment(); + } + if (LOG.isTraceEnabled()) { + long size = map.size(); + assertCounterSanity(size, val); + } + if (newSize > currentAcceptableSize && !evictionInProgress) { + runEviction(); + } + } + + /** + * Sanity-checking for parity between actual block cache content and metrics. + * Intended only for use with TRACE level logging and -ea JVM. + */ + private static void assertCounterSanity(long mapSize, long counterVal) { + if (counterVal < 0) { + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); + return; + } + if (mapSize < Integer.MAX_VALUE) { + double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); + if (pct_diff > 0.05) { + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); + } + } + } + + /** + * Cache the block with the specified name and buffer. + *

    + * TODO after HBASE-22005, we may cache an block which allocated from off-heap, but our LRU cache + * sizing is based on heap size, so we should handle this in HBASE-22127. It will introduce an + * switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap, + * otherwise the caching size is based on off-heap. + * @param cacheKey block's cache key + * @param buf block buffer + */ + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { + cacheBlock(cacheKey, buf, false); + } + + /** + * Helper function that updates the local size counter and also updates any + * per-cf or per-blocktype metrics it can discern from given + * {@link LruCachedBlock} + */ + private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { + long heapsize = cb.heapSize(); + BlockType bt = cb.getBuffer().getBlockType(); + if (evict) { + heapsize *= -1; + } + if (bt != null && bt.isData()) { + dataBlockSize.add(heapsize); + } + return size.addAndGet(heapsize); + } + + /** + * Get the buffer of the block with the specified name. + * + * @param cacheKey block's cache key + * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block + * (used to avoid double counting cache misses when doing double-check + * locking) + * @param updateCacheMetrics Whether to update cache metrics or not + * + * @return buffer of specified cache key, or null if not in cache + */ + @Override + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { + LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> { + // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside + // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove + // the block and release, then we're retaining a block with refCnt=0 which is disallowed. + // see HBASE-22422. + val.getBuffer().retain(); + return val; + }); + if (cb == null) { + if (!repeat && updateCacheMetrics) { + stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + } + // If there is another block cache then try and read there. + // However if this is a retry ( second time in double checked locking ) + // And it's already a miss then the l2 will also be a miss. + if (victimHandler != null && !repeat) { + // The handler will increase result's refCnt for RPC, so need no extra retain. + Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + // Promote this to L1. + if (result != null) { + if (caching) { + cacheBlock(cacheKey, result, /* inMemory = */ false); + } + } + return result; + } + return null; + } + if (updateCacheMetrics) { + stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + } + cb.access(count.incrementAndGet()); + return cb.getBuffer(); + } + + /** + * Whether the cache contains block with specified cacheKey + * + * @return true if contains the block + */ + @Override + public boolean containsBlock(BlockCacheKey cacheKey) { + return map.containsKey(cacheKey); + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + LruCachedBlock cb = map.get(cacheKey); + return cb != null && evictBlock(cb, false) > 0; + } + + /** + * Evicts all blocks for a specific HFile. This is an + * expensive operation implemented as a linear-time search through all blocks + * in the cache. Ideally this should be a search in a log-access-time map. + * + *

    + * This is used for evict-on-close to remove all blocks of a specific HFile. + * + * @return the number of blocks evicted + */ + @Override + public int evictBlocksByHfileName(String hfileName) { + int numEvicted = 0; + for (BlockCacheKey key : map.keySet()) { + if (key.getHfileName().equals(hfileName)) { + if (evictBlock(key)) { + ++numEvicted; + } + } + } + if (victimHandler != null) { + numEvicted += victimHandler.evictBlocksByHfileName(hfileName); + } + return numEvicted; + } + + /** + * Evict the block, and it will be cached by the victim handler if exists && + * block may be read again later + * + * @param evictedByEvictionProcess true if the given block is evicted by + * EvictionThread + * @return the heap size of evicted block + */ + protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { + LruCachedBlock previous = map.remove(block.getCacheKey()); + if (previous == null) { + return 0; + } + updateSizeMetrics(block, true); + long val = elements.decrementAndGet(); + if (LOG.isTraceEnabled()) { + long size = map.size(); + assertCounterSanity(size, val); + } + if (block.getBuffer().getBlockType().isData()) { + dataBlockElements.decrement(); + } + if (evictedByEvictionProcess) { + // When the eviction of the block happened because of invalidation of HFiles, no need to + // update the stats counter. + stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary()); + if (victimHandler != null) { + victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); + } + } + // Decrease the block's reference count, and if refCount is 0, then it'll auto-deallocate. DO + // NOT move this up because if do that then the victimHandler may access the buffer with + // refCnt = 0 which is disallowed. + previous.getBuffer().release(); + return block.heapSize(); + } + + /** + * Multi-threaded call to run the eviction process. + */ + private void runEviction() { + if (evictionThread == null) { + evict(); + } else { + evictionThread.evict(); + } + } + + boolean isEvictionInProgress() { + return evictionInProgress; + } + + long getOverhead() { + return overhead; + } + + /** + * Eviction method. + * + * Evict items in order of use, allowing delete items + * which haven't been used for the longest amount of time. + * + * @return how many bytes were freed + */ + long evict() { + + // Ensure only one eviction at a time + if (!evictionLock.tryLock()) { + return 0; + } + + long bytesToFree = 0L; + + try { + evictionInProgress = true; + long currentSize = this.size.get(); + bytesToFree = currentSize - minSize(); + + if (LOG.isTraceEnabled()) { + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + + StringUtils.byteDesc(currentSize)); + } + + if (bytesToFree <= 0) { + return 0; + } + + // Instantiate priority buckets + BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + + // Scan entire map putting into appropriate buckets + for (LruCachedBlock cachedBlock : map.values()) { + switch (cachedBlock.getPriority()) { + case SINGLE: { + bucketSingle.add(cachedBlock); + break; + } + case MULTI: { + bucketMulti.add(cachedBlock); + break; + } + case MEMORY: { + bucketMemory.add(cachedBlock); + break; + } + } + } + + long bytesFreed = 0; + if (forceInMemory || memoryFactor > 0.999f) { + long s = bucketSingle.totalSize(); + long m = bucketMulti.totalSize(); + if (bytesToFree > (s + m)) { + // this means we need to evict blocks in memory bucket to make room, + // so the single and multi buckets will be emptied + bytesFreed = bucketSingle.free(s); + bytesFreed += bucketMulti.free(m); + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + + " from single and multi buckets"); + } + bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + + " total from all three buckets "); + } + } else { + // this means no need to evict block in memory bucket, + // and we try best to make the ratio between single-bucket and + // multi-bucket is 1:2 + long bytesRemain = s + m - bytesToFree; + if (3 * s <= bytesRemain) { + // single-bucket is small enough that no eviction happens for it + // hence all eviction goes from multi-bucket + bytesFreed = bucketMulti.free(bytesToFree); + } else if (3 * m <= 2 * bytesRemain) { + // multi-bucket is small enough that no eviction happens for it + // hence all eviction goes from single-bucket + bytesFreed = bucketSingle.free(bytesToFree); + } else { + // both buckets need to evict some blocks + bytesFreed = bucketSingle.free(s - bytesRemain / 3); + if (bytesFreed < bytesToFree) { + bytesFreed += bucketMulti.free(bytesToFree - bytesFreed); + } + } + } + } else { + PriorityQueue bucketQueue = new PriorityQueue<>(3); + + bucketQueue.add(bucketSingle); + bucketQueue.add(bucketMulti); + bucketQueue.add(bucketMemory); + + int remainingBuckets = bucketQueue.size(); + + BlockBucket bucket; + while ((bucket = bucketQueue.poll()) != null) { + long overflow = bucket.overflow(); + if (overflow > 0) { + long bucketBytesToFree = + Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); + bytesFreed += bucket.free(bucketBytesToFree); + } + remainingBuckets--; + } + } + if (LOG.isTraceEnabled()) { + long single = bucketSingle.totalSize(); + long multi = bucketMulti.totalSize(); + long memory = bucketMemory.totalSize(); + LOG.trace("Block cache LRU eviction completed; " + + "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + + "single=" + StringUtils.byteDesc(single) + ", " + + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); + } + } finally { + stats.evict(); + evictionInProgress = false; + evictionLock.unlock(); + return bytesToFree; + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("blockCount", getBlockCount()) + .add("currentSize", StringUtils.byteDesc(getCurrentSize())) + .add("freeSize", StringUtils.byteDesc(getFreeSize())) + .add("maxSize", StringUtils.byteDesc(getMaxSize())) + .add("heapSize", StringUtils.byteDesc(heapSize())) + .add("minSize", StringUtils.byteDesc(minSize())) + .add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())) + .add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())) + .add("singleFactor", singleFactor) + .toString(); + } + + /** + * Used to group blocks into priority buckets. There will be a BlockBucket + * for each priority (single, multi, memory). Once bucketed, the eviction + * algorithm takes the appropriate number of elements out of each according + * to configuration parameters and their relatives sizes. + */ + private class BlockBucket implements Comparable { + + private final String name; + private LruCachedBlockQueue queue; + private long totalSize = 0; + private long bucketSize; + + public BlockBucket(String name, long bytesToFree, long blockSize, long bucketSize) { + this.name = name; + this.bucketSize = bucketSize; + queue = new LruCachedBlockQueue(bytesToFree, blockSize); + totalSize = 0; + } + + public void add(LruCachedBlock block) { + totalSize += block.heapSize(); + queue.add(block); + } + + public long free(long toFree) { + if (LOG.isTraceEnabled()) { + LOG.trace("freeing " + StringUtils.byteDesc(toFree) + " from " + this); + } + LruCachedBlock cb; + long freedBytes = 0; + while ((cb = queue.pollLast()) != null) { + freedBytes += evictBlock(cb, true); + if (freedBytes >= toFree) { + return freedBytes; + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("freed " + StringUtils.byteDesc(freedBytes) + " from " + this); + } + return freedBytes; + } + + public long overflow() { + return totalSize - bucketSize; + } + + public long totalSize() { + return totalSize; + } + + @Override + public int compareTo(BlockBucket that) { + return Long.compare(this.overflow(), that.overflow()); + } + + @Override + public boolean equals(Object that) { + if (that == null || !(that instanceof BlockBucket)) { + return false; + } + return compareTo((BlockBucket)that) == 0; + } + + @Override + public int hashCode() { + return Objects.hashCode(name, bucketSize, queue, totalSize); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name) + .add("totalSize", StringUtils.byteDesc(totalSize)) + .add("bucketSize", StringUtils.byteDesc(bucketSize)) + .toString(); + } + } + + /** + * Get the maximum size of this cache. + * + * @return max size in bytes + */ + + @Override + public long getMaxSize() { + return this.maxSize; + } + + @Override + public long getCurrentSize() { + return this.size.get(); + } + + @Override + public long getCurrentDataSize() { + return this.dataBlockSize.sum(); + } + + @Override + public long getFreeSize() { + return getMaxSize() - getCurrentSize(); + } + + @Override + public long size() { + return getMaxSize(); + } + + @Override + public long getBlockCount() { + return this.elements.get(); + } + + @Override + public long getDataBlockCount() { + return this.dataBlockElements.sum(); + } + + EvictionThread getEvictionThread() { + return this.evictionThread; + } + + /* + * Eviction thread. Sits in waiting state until an eviction is triggered + * when the cache size grows above the acceptable level.

    + * + * Thread is triggered into action by {@link AdaptiveLruBlockCache#runEviction()} + */ + static class EvictionThread extends Thread { + + private WeakReference cache; + private volatile boolean go = true; + // flag set after enter the run method, used for test + private boolean enteringRun = false; + + public EvictionThread(AdaptiveLruBlockCache cache) { + super(Thread.currentThread().getName() + ".AdaptiveLruBlockCache.EvictionThread"); + setDaemon(true); + this.cache = new WeakReference<>(cache); + } + + @Override + public void run() { + enteringRun = true; + long freedSumMb = 0; + int heavyEvictionCount = 0; + int freedDataOverheadPercent = 0; + long startTime = System.currentTimeMillis(); + while (this.go) { + synchronized (this) { + try { + this.wait(1000 * 10/*Don't wait for ever*/); + } catch (InterruptedException e) { + LOG.warn("Interrupted eviction thread ", e); + Thread.currentThread().interrupt(); + } + } + AdaptiveLruBlockCache cache = this.cache.get(); + if (cache == null) { + break; + } + freedSumMb += cache.evict()/1024/1024; + /* + * Sometimes we are reading more data than can fit into BlockCache + * and it is the cause a high rate of evictions. + * This in turn leads to heavy Garbage Collector works. + * So a lot of blocks put into BlockCache but never read, + * but spending a lot of CPU resources. + * Here we will analyze how many bytes were freed and decide + * decide whether the time has come to reduce amount of caching blocks. + * It help avoid put too many blocks into BlockCache + * when evict() works very active and save CPU for other jobs. + * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + */ + + // First of all we have to control how much time + // has passed since previuos evict() was launched + // This is should be almost the same time (+/- 10s) + // because we get comparable volumes of freed bytes each time. + // 10s because this is default period to run evict() (see above this.wait) + long stopTime = System.currentTimeMillis(); + if ((stopTime - startTime) > 1000 * 10 - 1) { + // Here we have to calc what situation we have got. + // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" + // and can calculte overhead on it. + // We will use this information to decide, + // how to change percent of caching blocks. + freedDataOverheadPercent = + (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; + if (freedSumMb > cache.heavyEvictionMbSizeLimit) { + // Now we are in the situation when we are above the limit + // But maybe we are going to ignore it because it will end quite soon + heavyEvictionCount++; + if (heavyEvictionCount > cache.heavyEvictionCountLimit) { + // It is going for a long time and we have to reduce of caching + // blocks now. So we calculate here how many blocks we want to skip. + // It depends on: + // 1. Overhead - if overhead is big we could more aggressive + // reducing amount of caching blocks. + // 2. How fast we want to get the result. If we know that our + // heavy reading for a long time, we don't want to wait and can + // increase the coefficient and get good performance quite soon. + // But if we don't sure we can do it slowly and it could prevent + // premature exit from this mode. So, when the coefficient is + // higher we can get better performance when heavy reading is stable. + // But when reading is changing we can adjust to it and set + // the coefficient to lower value. + int change = + (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + // But practice shows that 15% of reducing is quite enough. + // We are not greedy (it could lead to premature exit). + change = Math.min(15, change); + change = Math.max(0, change); // I think it will never happen but check for sure + // So this is the key point, here we are reducing % of caching blocks + cache.cacheDataBlockPercent -= change; + // If we go down too deep we have to stop here, 1% any way should be. + cache.cacheDataBlockPercent = Math.max(1, cache.cacheDataBlockPercent); + } + } else { + // Well, we have got overshooting. + // Mayby it is just short-term fluctuation and we can stay in this mode. + // It help avoid permature exit during short-term fluctuation. + // If overshooting less than 90%, we will try to increase the percent of + // caching blocks and hope it is enough. + if (freedSumMb >= cache.heavyEvictionMbSizeLimit * 0.1) { + // Simple logic: more overshooting - more caching blocks (backpressure) + int change = (int) (-freedDataOverheadPercent * 0.1 + 1); + cache.cacheDataBlockPercent += change; + // But it can't be more then 100%, so check it. + cache.cacheDataBlockPercent = Math.min(100, cache.cacheDataBlockPercent); + } else { + // Looks like heavy reading is over. + // Just exit form this mode. + heavyEvictionCount = 0; + cache.cacheDataBlockPercent = 100; + } + } + LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, + heavyEvictionCount, cache.cacheDataBlockPercent); + + freedSumMb = 0; + startTime = stopTime; + } + } + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", + justification="This is what we want") + public void evict() { + synchronized (this) { + this.notifyAll(); + } + } + + synchronized void shutdown() { + this.go = false; + this.notifyAll(); + } + + /** + * Used for the test. + */ + boolean isEnteringRun() { + return this.enteringRun; + } + } + + /* + * Statistics thread. Periodically prints the cache statistics to the log. + */ + static class StatisticsThread extends Thread { + + private final AdaptiveLruBlockCache lru; + + public StatisticsThread(AdaptiveLruBlockCache lru) { + super("AdaptiveLruBlockCacheStats"); + setDaemon(true); + this.lru = lru; + } + + @Override + public void run() { + lru.logStats(); + } + } + + public void logStats() { + // Log size + long totalSize = heapSize(); + long freeSize = maxSize - totalSize; + AdaptiveLruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + + "hitRatio=" + (stats.getHitCount() == 0 ? + "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + + "cachingHits=" + stats.getHitCachingCount() + ", " + + "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? + "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + + "evicted=" + stats.getEvictedCount() + ", " + + "evictedPerRun=" + stats.evictedPerEviction()); + } + + /** + * Get counter statistics for this cache. + * + *

    Includes: total accesses, hits, misses, evicted blocks, and runs + * of the eviction processes. + */ + @Override + public CacheStats getStats() { + return this.stats; + } + + public final static long CACHE_FIXED_OVERHEAD = + ClassSize.estimateBase(AdaptiveLruBlockCache.class, false); + + @Override + public long heapSize() { + return getCurrentSize(); + } + + private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { + // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG + return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + } + + @Override + public Iterator iterator() { + final Iterator iterator = map.values().iterator(); + + return new Iterator() { + private final long now = System.nanoTime(); + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public CachedBlock next() { + final LruCachedBlock b = iterator.next(); + return new CachedBlock() { + @Override + public String toString() { + return BlockCacheUtil.toString(this, now); + } + + @Override + public BlockPriority getBlockPriority() { + return b.getPriority(); + } + + @Override + public BlockType getBlockType() { + return b.getBuffer().getBlockType(); + } + + @Override + public long getOffset() { + return b.getCacheKey().getOffset(); + } + + @Override + public long getSize() { + return b.getBuffer().heapSize(); + } + + @Override + public long getCachedTime() { + return b.getCachedTime(); + } + + @Override + public String getFilename() { + return b.getCacheKey().getHfileName(); + } + + @Override + public int compareTo(CachedBlock other) { + int diff = this.getFilename().compareTo(other.getFilename()); + if (diff != 0) { + return diff; + } + diff = Long.compare(this.getOffset(), other.getOffset()); + if (diff != 0) { + return diff; + } + if (other.getCachedTime() < 0 || this.getCachedTime() < 0) { + throw new IllegalStateException(this.getCachedTime() + ", " + other.getCachedTime()); + } + return Long.compare(other.getCachedTime(), this.getCachedTime()); + } + + @Override + public int hashCode() { + return b.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof CachedBlock) { + CachedBlock cb = (CachedBlock)obj; + return compareTo(cb) == 0; + } else { + return false; + } + } + }; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + // Simple calculators of sizes given factors and maxSize + + long acceptableSize() { + return (long)Math.floor(this.maxSize * this.acceptableFactor); + } + private long minSize() { + return (long)Math.floor(this.maxSize * this.minFactor); + } + private long singleSize() { + return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + } + private long multiSize() { + return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + } + private long memorySize() { + return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); + } + + @Override + public void shutdown() { + if (victimHandler != null) { + victimHandler.shutdown(); + } + this.scheduleThreadPool.shutdown(); + for (int i = 0; i < 10; i++) { + if (!this.scheduleThreadPool.isShutdown()) { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + LOG.warn("Interrupted while sleeping"); + Thread.currentThread().interrupt(); + break; + } + } + } + + if (!this.scheduleThreadPool.isShutdown()) { + List runnables = this.scheduleThreadPool.shutdownNow(); + LOG.debug("Still running " + runnables); + } + this.evictionThread.shutdown(); + } + + /** Clears the cache. Used in tests. */ + public void clearCache() { + this.map.clear(); + this.elements.set(0); + } + + /** + * Used in testing. May be very inefficient. + * + * @return the set of cached file names + */ + SortedSet getCachedFileNamesForTest() { + SortedSet fileNames = new TreeSet<>(); + for (BlockCacheKey cacheKey : map.keySet()) { + fileNames.add(cacheKey.getHfileName()); + } + return fileNames; + } + + public Map getEncodingCountsForTest() { + Map counts = new EnumMap<>(DataBlockEncoding.class); + for (LruCachedBlock block : map.values()) { + DataBlockEncoding encoding = ((HFileBlock) block.getBuffer()).getDataBlockEncoding(); + Integer count = counts.get(encoding); + counts.put(encoding, (count == null ? 0 : count) + 1); + } + return counts; + } + + Map getMapForTests() { + return map; + } + + @Override + public BlockCache[] getBlockCaches() { + if (victimHandler != null) { + return new BlockCache[] { this, this.victimHandler }; + } + return null; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java index 2b9732092ce9..19725489a975 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -145,6 +145,8 @@ private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) return new LruBlockCache(cacheSize, blockSize, true, c); } else if (policy.equalsIgnoreCase("TinyLFU")) { return new TinyLfuBlockCache(cacheSize, blockSize, ForkJoinPool.commonPool(), c); + } else if (policy.equalsIgnoreCase("adaptiveLRU")) { + return new AdaptiveLruBlockCache(cacheSize, blockSize, true, c); } else { throw new IllegalArgumentException("Unknown policy: " + policy); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java new file mode 100644 index 000000000000..fa2f9afed5c2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java @@ -0,0 +1,1124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.nio.ByteBuffer; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.hfile.AdaptiveLruBlockCache.EvictionThread; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.ClassSize; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tests the concurrent AdaptiveLruBlockCache.

    + * + * Tests will ensure it grows and shrinks in size properly, + * evictions run when they're supposed to and do what they should, + * and that cached blocks are accessible when expected to be. + */ +@Category({IOTests.class, SmallTests.class}) +public class TestAdaptiveLruBlockCache { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAdaptiveLruBlockCache.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestAdaptiveLruBlockCache.class); + + @Test + public void testCacheEvictionThreadSafe() throws Exception { + long maxSize = 100000; + int numBlocks = 9; + int testRuns = 10; + final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final Configuration conf = HBaseConfiguration.create(); + final AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize); + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + final String hfileName = "hfile"; + int threads = 10; + final int blocksPerThread = 5 * numBlocks; + for (int run = 0; run != testRuns; ++run) { + final AtomicInteger blockCount = new AtomicInteger(0); + ExecutorService service = Executors.newFixedThreadPool(threads); + for (int i = 0; i != threads; ++i) { + service.execute(new Runnable() { + @Override + public void run() { + for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { + CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); + boolean inMemory = Math.random() > 0.5; + cache.cacheBlock(block.cacheKey, block, inMemory); + } + cache.evictBlocksByHfileName(hfileName); + } + }); + } + service.shutdown(); + // The test may fail here if the evict thread frees the blocks too fast + service.awaitTermination(10, TimeUnit.MINUTES); + Waiter.waitFor(conf, 10000, 100, new ExplainingPredicate() { + @Override + public boolean evaluate() throws Exception { + return cache.getBlockCount() == 0; + } + + @Override + public String explainFailure() throws Exception { + return "Cache block count failed to return to 0"; + } + }); + assertEquals(0, cache.getBlockCount()); + assertEquals(cache.getOverhead(), cache.getCurrentSize()); + } + } + @Test + public void testBackgroundEvictionThread() throws Exception { + long maxSize = 100000; + int numBlocks = 9; + long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize,blockSize); + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + + CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block"); + + // Make sure eviction thread has entered run method + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + + // Add all the blocks + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + } + + // wait until at least one eviction has run + int n = 0; + while(cache.getStats().getEvictionCount() == 0) { + Thread.sleep(200); + assertTrue("Eviction never happened.", n++ < 20); + } + + // let cache stabilize + // On some systems, the cache will run multiple evictions before it attains + // steady-state. For instance, after populating the cache with 10 blocks, + // the first eviction evicts a single block and then a second eviction + // evicts another. I think this is due to the delta between minSize and + // acceptableSize, combined with variance between object overhead on + // different environments. + n = 0; + for (long prevCnt = 0 /* < number of blocks added */, + curCnt = cache.getBlockCount(); + prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { + Thread.sleep(200); + assertTrue("Cache never stabilized.", n++ < 20); + } + + long evictionCount = cache.getStats().getEvictionCount(); + assertTrue(evictionCount >= 1); + System.out.println("Background Evictions run: " + evictionCount); + } + + @Test + public void testCacheSimple() throws Exception { + + long maxSize = 1000000; + long blockSize = calculateBlockSizeDefault(maxSize, 101); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize); + + CachedItem [] blocks = generateRandomBlocks(100, blockSize); + + long expectedCacheSize = cache.heapSize(); + + // Confirm empty + for (CachedItem block : blocks) { + assertTrue(cache.getBlock(block.cacheKey, true, false, true) == null); + } + + // Add blocks + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + expectedCacheSize += block.cacheBlockHeapSize(); + } + + // Verify correctly calculated cache heap size + assertEquals(expectedCacheSize, cache.heapSize()); + + // Check if all blocks are properly cached and retrieved + for (CachedItem block : blocks) { + HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); + assertTrue(buf != null); + assertEquals(buf.heapSize(), block.heapSize()); + } + + // Re-add same blocks and ensure nothing has changed + long expectedBlockCount = cache.getBlockCount(); + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + } + assertEquals( + "Cache should ignore cache requests for blocks already in cache", + expectedBlockCount, cache.getBlockCount()); + + // Verify correctly calculated cache heap size + assertEquals(expectedCacheSize, cache.heapSize()); + + // Check if all blocks are properly cached and retrieved + for (CachedItem block : blocks) { + HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); + assertTrue(buf != null); + assertEquals(buf.heapSize(), block.heapSize()); + } + + // Expect no evictions + assertEquals(0, cache.getStats().getEvictionCount()); + Thread t = new AdaptiveLruBlockCache.StatisticsThread(cache); + t.start(); + t.join(); + } + + @Test + public void testCacheEvictionSimple() throws Exception { + + long maxSize = 100000; + long blockSize = calculateBlockSizeDefault(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize,blockSize,false); + + CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block"); + + long expectedCacheSize = cache.heapSize(); + + // Add all the blocks + for (CachedItem block : blocks) { + cache.cacheBlock(block.cacheKey, block); + expectedCacheSize += block.cacheBlockHeapSize(); + } + + // A single eviction run should have occurred + assertEquals(1, cache.getStats().getEvictionCount()); + + // Our expected size overruns acceptable limit + assertTrue(expectedCacheSize > + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // But the cache did not grow beyond max + assertTrue(cache.heapSize() < maxSize); + + // And is still below the acceptable limit + assertTrue(cache.heapSize() < + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // All blocks except block 0 should be in the cache + assertTrue(cache.getBlock(blocks[0].cacheKey, true, false, true) == null); + for(int i=1;i + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // But the cache did not grow beyond max + assertTrue(cache.heapSize() <= maxSize); + + // And is now below the acceptable limit + assertTrue(cache.heapSize() <= + (maxSize * AdaptiveLruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + + // We expect fairness across the two priorities. + // This test makes multi go barely over its limit, in-memory + // empty, and the rest in single. Two single evictions and + // one multi eviction expected. + assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true, false, true) == null); + assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true, false, true) == null); + + // And all others to be cached + for(int i=1;i<4;i++) { + assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false, true), + singleBlocks[i]); + assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false, true), + multiBlocks[i]); + } + } + + @Test + public void testCacheEvictionThreePriorities() throws Exception { + + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); + CachedItem [] memoryBlocks = generateFixedBlocks(5, blockSize, "memory"); + + long expectedCacheSize = cache.heapSize(); + + // Add 3 blocks from each priority + for(int i=0;i<3;i++) { + + // Just add single blocks + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); + + // Add and get multi blocks + cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); + expectedCacheSize += multiBlocks[i].cacheBlockHeapSize(); + cache.getBlock(multiBlocks[i].cacheKey, true, false, true); + + // Add memory blocks as such + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); + expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize(); + + } + + // Do not expect any evictions yet + assertEquals(0, cache.getStats().getEvictionCount()); + + // Verify cache size + assertEquals(expectedCacheSize, cache.heapSize()); + + // Insert a single block, oldest single should be evicted + cache.cacheBlock(singleBlocks[3].cacheKey, singleBlocks[3]); + + // Single eviction, one thing evicted + assertEquals(1, cache.getStats().getEvictionCount()); + assertEquals(1, cache.getStats().getEvictedCount()); + + // Verify oldest single block is the one evicted + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); + + // Change the oldest remaining single block to a multi + cache.getBlock(singleBlocks[1].cacheKey, true, false, true); + + // Insert another single block + cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]); + + // Two evictions, two evicted. + assertEquals(2, cache.getStats().getEvictionCount()); + assertEquals(2, cache.getStats().getEvictedCount()); + + // Oldest multi block should be evicted now + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + + // Insert another memory block + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); + + // Three evictions, three evicted. + assertEquals(3, cache.getStats().getEvictionCount()); + assertEquals(3, cache.getStats().getEvictedCount()); + + // Oldest memory block should be evicted now + assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); + + // Add a block that is twice as big (should force two evictions) + CachedItem [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big"); + cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]); + + // Four evictions, six evicted (inserted block 3X size, expect +3 evicted) + assertEquals(4, cache.getStats().getEvictionCount()); + assertEquals(6, cache.getStats().getEvictedCount()); + + // Expect three remaining singles to be evicted + assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); + + // Make the big block a multi block + cache.getBlock(bigBlocks[0].cacheKey, true, false, true); + + // Cache another single big block + cache.cacheBlock(bigBlocks[1].cacheKey, bigBlocks[1]); + + // Five evictions, nine evicted (3 new) + assertEquals(5, cache.getStats().getEvictionCount()); + assertEquals(9, cache.getStats().getEvictedCount()); + + // Expect three remaining multis to be evicted + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); + + // Cache a big memory block + cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true); + + // Six evictions, twelve evicted (3 new) + assertEquals(6, cache.getStats().getEvictionCount()); + assertEquals(12, cache.getStats().getEvictedCount()); + + // Expect three remaining in-memory to be evicted + assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[3].cacheKey, true, false, true)); + } + + @Test + public void testCacheEvictionInMemoryForceMode() throws Exception { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.98f, // min + 0.99f, // acceptable + 0.2f, // single + 0.3f, // multi + 0.5f, // memory + 1.2f, // limit + true, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); + CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); + + long expectedCacheSize = cache.heapSize(); + + // 0. Add 5 single blocks and 4 multi blocks to make cache full, si:mu:me = 5:4:0 + for(int i = 0; i < 4; i++) { + // Just add single blocks + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); + // Add and get multi blocks + cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); + expectedCacheSize += multiBlocks[i].cacheBlockHeapSize(); + cache.getBlock(multiBlocks[i].cacheKey, true, false, true); + } + // 5th single block + cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]); + expectedCacheSize += singleBlocks[4].cacheBlockHeapSize(); + // Do not expect any evictions yet + assertEquals(0, cache.getStats().getEvictionCount()); + // Verify cache size + assertEquals(expectedCacheSize, cache.heapSize()); + + // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1 + cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true); + // Single eviction, one block evicted + assertEquals(1, cache.getStats().getEvictionCount()); + assertEquals(1, cache.getStats().getEvictedCount()); + // Verify oldest single block (index = 0) is the one evicted + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); + + // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2 + cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true); + // Two evictions, two evicted. + assertEquals(2, cache.getStats().getEvictionCount()); + assertEquals(2, cache.getStats().getEvictedCount()); + // Current oldest single block (index = 1) should be evicted now + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + + // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6 + cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true); + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); + cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true); + cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true); + // Three evictions, three evicted. + assertEquals(6, cache.getStats().getEvictionCount()); + assertEquals(6, cache.getStats().getEvictedCount()); + // two oldest single blocks and two oldest multi blocks evicted + assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + + // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted + // si:mu:me = 0:0:9 + cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true); + cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true); + cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true); + // Three evictions, three evicted. + assertEquals(9, cache.getStats().getEvictionCount()); + assertEquals(9, cache.getStats().getEvictedCount()); + // one oldest single block and two oldest multi blocks evicted + assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[3].cacheKey, true, false, true)); + + // 5. Insert one memory block, the oldest memory evicted + // si:mu:me = 0:0:9 + cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true); + // one eviction, one evicted. + assertEquals(10, cache.getStats().getEvictionCount()); + assertEquals(10, cache.getStats().getEvictedCount()); + // oldest memory block evicted + assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); + + // 6. Insert one new single block, itself evicted immediately since + // all blocks in cache are memory-type which have higher priority + // si:mu:me = 0:0:9 (no change) + cache.cacheBlock(singleBlocks[9].cacheKey, singleBlocks[9]); + // one eviction, one evicted. + assertEquals(11, cache.getStats().getEvictionCount()); + assertEquals(11, cache.getStats().getEvictedCount()); + // the single block just cached now evicted (can't evict memory) + assertEquals(null, cache.getBlock(singleBlocks[9].cacheKey, true, false, true)); + } + + // test scan resistance + @Test + public void testScanResistance() throws Exception { + + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); + + // Add 5 multi blocks + for (CachedItem block : multiBlocks) { + cache.cacheBlock(block.cacheKey, block); + cache.getBlock(block.cacheKey, true, false, true); + } + + // Add 5 single blocks + for(int i=0;i<5;i++) { + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + } + + // An eviction ran + assertEquals(1, cache.getStats().getEvictionCount()); + + // To drop down to 2/3 capacity, we'll need to evict 4 blocks + assertEquals(4, cache.getStats().getEvictedCount()); + + // Should have been taken off equally from single and multi + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + + // Let's keep "scanning" by adding single blocks. From here on we only + // expect evictions from the single bucket. + + // Every time we reach 10 total blocks (every 4 inserts) we get 4 single + // blocks evicted. Inserting 13 blocks should yield 3 more evictions and + // 12 more evicted. + + for(int i=5;i<18;i++) { + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + } + + // 4 total evictions, 16 total evicted + assertEquals(4, cache.getStats().getEvictionCount()); + assertEquals(16, cache.getStats().getEvictedCount()); + + // Should now have 7 total blocks + assertEquals(7, cache.getBlockCount()); + + } + + @Test + public void testMaxBlockSize() throws Exception { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 1024, + 10, + 500, + 0.01f); + + CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long"); + CachedItem [] small = generateFixedBlocks(15, 600, "small"); + + + for (CachedItem i:tooLong) { + cache.cacheBlock(i.cacheKey, i); + } + for (CachedItem i:small) { + cache.cacheBlock(i.cacheKey, i); + } + assertEquals(15,cache.getBlockCount()); + for (CachedItem i:small) { + assertNotNull(cache.getBlock(i.cacheKey, true, false, false)); + } + for (CachedItem i:tooLong) { + assertNull(cache.getBlock(i.cacheKey, true, false, false)); + } + + assertEquals(10, cache.getStats().getFailedInserts()); + } + + // test setMaxSize + @Test + public void testResizeBlockCache() throws Exception { + + long maxSize = 300000; + long blockSize = calculateBlockSize(maxSize, 31); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 16 * 1024 * 1024, + 10, + 500, + 0.01f); + + CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); + CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); + CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); + + // Add all blocks from all priorities + for(int i=0;i<10;i++) { + + // Just add single blocks + cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); + + // Add and get multi blocks + cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); + cache.getBlock(multiBlocks[i].cacheKey, true, false, true); + + // Add memory blocks as such + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); + } + + // Do not expect any evictions yet + assertEquals(0, cache.getStats().getEvictionCount()); + + // Resize to half capacity plus an extra block (otherwise we evict an extra) + cache.setMaxSize((long)(maxSize * 0.5f)); + + // Should have run a single eviction + assertEquals(1, cache.getStats().getEvictionCount()); + + // And we expect 1/2 of the blocks to be evicted + assertEquals(15, cache.getStats().getEvictedCount()); + + // And the oldest 5 blocks from each category should be gone + for(int i=0;i<5;i++) { + assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); + } + + // And the newest 5 blocks should still be accessible + for(int i=5;i<10;i++) { + assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); + assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); + assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); + } + } + + // test metricsPastNPeriods + @Test + public void testPastNPeriodsMetrics() throws Exception { + double delta = 0.01; + + // 3 total periods + CacheStats stats = new CacheStats("test", 3); + + // No accesses, should be 0 + stats.rollMetricsPeriod(); + assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 1, 1 hit caching, 1 hit non-caching, 2 miss non-caching + // should be (2/4)=0.5 and (1/1)=1 + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(1.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 2, 1 miss caching, 3 miss non-caching + // should be (2/8)=0.25 and (1/2)=0.5 + stats.miss(true, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.25, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 3, 2 hits of each type + // should be (6/12)=0.5 and (3/4)=0.75 + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.75, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 4, evict period 1, two caching misses + // should be (4/10)=0.4 and (2/5)=0.4 + stats.miss(true, false, BlockType.DATA); + stats.miss(true, false, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.4, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.4, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 5, evict period 2, 2 caching misses, 2 non-caching hit + // should be (6/10)=0.6 and (2/6)=1/3 + stats.miss(true, false, BlockType.DATA); + stats.miss(true, false, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta); + assertEquals((double)1/3, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 6, evict period 3 + // should be (2/6)=1/3 and (0/4)=0 + stats.rollMetricsPeriod(); + assertEquals((double)1/3, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 7, evict period 4 + // should be (2/4)=0.5 and (0/2)=0 + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 8, evict period 5 + // should be 0 and 0 + stats.rollMetricsPeriod(); + assertEquals(0.0, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); + + // period 9, one of each + // should be (2/4)=0.5 and (1/2)=0.5 + stats.miss(true, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.rollMetricsPeriod(); + assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); + assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); + } + + @Test + public void testCacheBlockNextBlockMetadataMissing() { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + int size = 100; + int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; + byte[] byteArr = new byte[length]; + ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size); + HFileContext meta = new HFileContextBuilder().build(); + HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, HEAP); + HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, HEAP); + + AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize, false, + (int)Math.ceil(1.2*maxSize/blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, + AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + 1024, + 10, + 500, + 0.01f); + + BlockCacheKey key = new BlockCacheKey("key1", 0); + ByteBuffer actualBuffer = ByteBuffer.allocate(length); + ByteBuffer block1Buffer = ByteBuffer.allocate(length); + ByteBuffer block2Buffer = ByteBuffer.allocate(length); + blockWithNextBlockMetadata.serialize(block1Buffer, true); + blockWithoutNextBlockMetadata.serialize(block2Buffer, true); + + //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back. + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, + block1Buffer); + + //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back. + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, + block1Buffer); + + //Clear and add blockWithoutNextBlockMetadata + cache.clearCache(); + assertNull(cache.getBlock(key, false, false, false)); + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, + block2Buffer); + + //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace. + CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, + block1Buffer); + } + + private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) { + CachedItem [] blocks = new CachedItem[numBlocks]; + for(int i=0;i getDeserializer() { + return null; + } + + @Override + public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { + } + + @Override + public BlockType getBlockType() { + return BlockType.DATA; + } + } + + static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exception { + int size = 100; + int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; + byte[] byteArr = new byte[length]; + HFileContext meta = new HFileContextBuilder().build(); + BlockCacheKey key = new BlockCacheKey("key1", 0); + HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, + HEAP); + AtomicBoolean err1 = new AtomicBoolean(false); + Thread t1 = new Thread(() -> { + for (int i = 0; i < 10000 && !err1.get(); i++) { + try { + cache.getBlock(key, false, false, true); + } catch (Exception e) { + err1.set(true); + LOG.info("Cache block or get block failure: ", e); + } + } + }); + + AtomicBoolean err2 = new AtomicBoolean(false); + Thread t2 = new Thread(() -> { + for (int i = 0; i < 10000 && !err2.get(); i++) { + try { + cache.evictBlock(key); + } catch (Exception e) { + err2.set(true); + LOG.info("Evict block failure: ", e); + } + } + }); + + AtomicBoolean err3 = new AtomicBoolean(false); + Thread t3 = new Thread(() -> { + for (int i = 0; i < 10000 && !err3.get(); i++) { + try { + cache.cacheBlock(key, blk); + } catch (Exception e) { + err3.set(true); + LOG.info("Cache block failure: ", e); + } + } + }); + t1.start(); + t2.start(); + t3.start(); + t1.join(); + t2.join(); + t3.join(); + Assert.assertFalse(err1.get()); + Assert.assertFalse(err2.get()); + Assert.assertFalse(err3.get()); + } + + @Test + public void testMultiThreadGetAndEvictBlock() throws Exception { + long maxSize = 100000; + long blockSize = calculateBlockSize(maxSize, 10); + AdaptiveLruBlockCache cache = + new AdaptiveLruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024, + 10, + 500, + 0.01f); + testMultiThreadGetAndEvictBlockInternal(cache); + } + + public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { + long maxSize = 100000000; + int numBlocks = 100000; + final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final AdaptiveLruBlockCache cache = + new AdaptiveLruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, + 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + heavyEvictionCountLimit, + 200, + 0.01f); + + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + + final String hfileName = "hfile"; + for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { + CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); + cache.cacheBlock(block.cacheKey, block, false); + if (cache.getCacheDataBlockPercent() < 70) { + // enough for test + break; + } + } + + evictionThread.evict(); + Thread.sleep(100); + + if (heavyEvictionCountLimit == 0) { + // Check if all offset (last two digits) of cached blocks less than the percent. + // It means some of blocks haven't put into BlockCache + assertTrue(cache.getCacheDataBlockPercent() < 90); + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + assertTrue(!(key.getOffset() % 100 > 90)); + } + } else { + // Check that auto-scaling is not working (all blocks in BlockCache) + assertTrue(cache.getCacheDataBlockPercent() == 100); + int counter = 0; + for (BlockCacheKey key : cache.getMapForTests().keySet()) { + if (key.getOffset() % 100 > 90) { + counter++; + } + } + assertTrue(counter > 1000); + } + evictionThread.shutdown(); + } + + @Test + public void testSkipCacheDataBlocks() throws Exception { + // Check that auto-scaling will work right after start + testSkipCacheDataBlocksInteral(0); + // Check that auto-scaling will not work right after start + // (have to finished before auto-scaling) + testSkipCacheDataBlocksInteral(100); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index b30575ebc55c..afaf85f5b2a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -61,7 +61,7 @@ public class TestLruBlockCache { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLruBlockCache.class); + HBaseClassTestRule.forClass(TestLruBlockCache.class); private static final Logger LOG = LoggerFactory.getLogger(TestLruBlockCache.class); @@ -156,8 +156,8 @@ public void testBackgroundEvictionThread() throws Exception { // different environments. n = 0; for (long prevCnt = 0 /* < number of blocks added */, - curCnt = cache.getBlockCount(); - prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { + curCnt = cache.getBlockCount(); + prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { Thread.sleep(200); assertTrue("Cache never stabilized.", n++ < 20); } @@ -206,8 +206,8 @@ public void testCacheSimple() throws Exception { cache.cacheBlock(block.cacheKey, block); } assertEquals( - "Cache should ignore cache requests for blocks already in cache", - expectedBlockCount, cache.getBlockCount()); + "Cache should ignore cache requests for blocks already in cache", + expectedBlockCount, cache.getBlockCount()); // Verify correctly calculated cache heap size assertEquals(expectedCacheSize, cache.heapSize()); @@ -256,13 +256,13 @@ public void testCacheEvictionSimple() throws Exception { // And is still below the acceptable limit assertTrue(cache.heapSize() < - (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // All blocks except block 0 should be in the cache assertTrue(cache.getBlock(blocks[0].cacheKey, true, false, true) == null); for(int i=1;i { for (int i = 0; i < 10000 && !err1.get(); i++) { @@ -1037,89 +1018,15 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, 1024, - 10, - 500, - 0.01f); - testMultiThreadGetAndEvictBlockInternal(cache); - } - - public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception { - long maxSize = 100000000; - int numBlocks = 100000; - final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); - - final LruBlockCache cache = - new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.5f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - maxSize, - heavyEvictionCountLimit, - 200, - 0.01f); - - EvictionThread evictionThread = cache.getEvictionThread(); - assertTrue(evictionThread != null); - while (!evictionThread.isEnteringRun()) { - Thread.sleep(1); - } - - final String hfileName = "hfile"; - for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) { - CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); - cache.cacheBlock(block.cacheKey, block, false); - if (cache.getCacheDataBlockPercent() < 70) { - // enough for test - break; - } - } - - evictionThread.evict(); - Thread.sleep(100); - - if (heavyEvictionCountLimit == 0) { - // Check if all offset (last two digits) of cached blocks less than the percent. - // It means some of blocks haven't put into BlockCache - assertTrue(cache.getCacheDataBlockPercent() < 90); - for (BlockCacheKey key : cache.getMapForTests().keySet()) { - assertTrue(!(key.getOffset() % 100 > 90)); - } - } else { - // Check that auto-scaling is not working (all blocks in BlockCache) - assertTrue(cache.getCacheDataBlockPercent() == 100); - int counter = 0; - for (BlockCacheKey key : cache.getMapForTests().keySet()) { - if (key.getOffset() % 100 > 90) { - counter++; - } - } - assertTrue(counter > 1000); - } - evictionThread.shutdown(); - } - - @Test - public void testSkipCacheDataBlocks() throws Exception { - // Check that auto-scaling will work right after start - testSkipCacheDataBlocksInteral(0); - // Check that auto-scaling will not work right after start - // (have to finished before auto-scaling) - testSkipCacheDataBlocksInteral(100); + 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024); + testMultiThreadGetAndEvictBlockInternal(cache); } - } From b140179994487f463eb823b3122dc6c4e8bb5129 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:12:05 +0300 Subject: [PATCH 727/769] Added AdaptiveLruBlockCache --- .../apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index a57464bb1d6d..a72e86b132a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -134,8 +134,8 @@ * can adjust to it and set the coefficient to lower value. * For example, we set the coefficient = 0.01. It means the overhead (see above) will be * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, - * if the overhead = 300% and the coefficient = 0.01, - * then percent of caching blocks will reduce by 3%. + * if the overhead = 300% and the coefficient = 0.01, * then percent of caching blocks will + * reduce by 3%. * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term * fluctuation and we will try to stay in this mode. It helps avoid premature exit during * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. From fffb3a3ff69c07334bb50c0a4fa5a5fa803dab39 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 25 Nov 2020 14:10:12 -0800 Subject: [PATCH 728/769] HBASE-24640 Purge use of VisibleForTesting (#2695) Signed-off-by: Reid Chan Signed-off-by: Nick Dimiduk --- .../java/org/apache/hadoop/hbase/master/SplitWALManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index 6db094c4e6df..e72b607a7f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WALSplitUtil; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; From c8ffb93b1a025c8335c951958607681deedcc759 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:06:52 +0300 Subject: [PATCH 729/769] Added AdaptiveLruBlockCache --- .../apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index a72e86b132a4..88f27f12d703 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -134,8 +134,13 @@ * can adjust to it and set the coefficient to lower value. * For example, we set the coefficient = 0.01. It means the overhead (see above) will be * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, +<<<<<<< HEAD * if the overhead = 300% and the coefficient = 0.01, * then percent of caching blocks will * reduce by 3%. +======= + * if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. +>>>>>>> Added AdaptiveLruBlockCache * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term * fluctuation and we will try to stay in this mode. It helps avoid premature exit during * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. From 134bec68fd6767a08c75d1c6291174a05a494d79 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:20:51 +0300 Subject: [PATCH 730/769] Added AdaptiveLruBlockCache + rebase --- .../hbase/io/hfile/AdaptiveLruBlockCache.java | 38 +++++++++++-------- .../io/hfile/TestAdaptiveLruBlockCache.java | 12 ++++-- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index 88f27f12d703..8318d998ce59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -327,7 +327,7 @@ public class AdaptiveLruBlockCache implements FirstLevelBlockCache { * @param blockSize approximate size of each block, in bytes */ public AdaptiveLruBlockCache(long maxSize, long blockSize) { - this(maxSize, blockSize, true); + this(maxSize, blockSize,true); } /** @@ -349,7 +349,8 @@ public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThrea DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); } - public AdaptiveLruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { + public AdaptiveLruBlockCache(long maxSize, long blockSize, + boolean evictionThread, Configuration conf) { this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), DEFAULT_LOAD_FACTOR, @@ -470,17 +471,17 @@ public int getCacheDataBlockPercent() { } /** - * The block cached in AdaptiveLruBlockCache will always be an heap block: on the one side, the heap - * access will be more faster then off-heap, the small index block or meta block cached in - * CombinedBlockCache will benefit a lot. on other side, the AdaptiveLruBlockCache size is always - * calculated based on the total heap size, if caching an off-heap block in AdaptiveLruBlockCache, the - * heap size will be messed up. Here we will clone the block into an heap block if it's an - * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of - * the block (HBASE-22127):
    + * The block cached in AdaptiveLruBlockCache will always be an heap block: on the one side, + * the heap access will be more faster then off-heap, the small index block or meta block + * cached in CombinedBlockCache will benefit a lot. on other side, the AdaptiveLruBlockCache size + * is always * calculated based on the total heap size, if caching an off-heap block in + * AdaptiveLruBlockCache, the heap size will be messed up. Here we will clone the block into an + * heap block if it's an off-heap block, otherwise just use the original block. The key point is + * maintain the refCnt of the block (HBASE-22127):
    * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
    * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's - * reservoir, if both RPC and AdaptiveLruBlockCache release the block, then it can be garbage collected by - * JVM, so need a retain here. + * reservoir, if both RPC and AdaptiveLruBlockCache release the block, then it can be garbage + * collected by JVM, so need a retain here. * @param buf the original block * @return an block with an heap memory backend. */ @@ -491,7 +492,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { return HFileBlock.deepCloneOnHeap(blk); } } - // The block will be referenced by this AdaptiveLruBlockCache, so should increase its refCnt here. + // The block will be referenced by this AdaptiveLruBlockCache, + // so should increase its refCnt here. return buf.retain(); } @@ -537,7 +539,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) } LruCachedBlock cb = map.get(cacheKey); - if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, buf)) { + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, + buf)) { return; } long currentSize = size.get(); @@ -806,9 +809,12 @@ long evict() { } // Instantiate priority buckets - BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); - BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); - BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + BlockBucket bucketSingle + = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti + = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory + = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); // Scan entire map putting into appropriate buckets for (LruCachedBlock cachedBlock : map.values()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java index fa2f9afed5c2..fb14a050dfbd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java @@ -984,7 +984,8 @@ static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exc HFileContext meta = new HFileContextBuilder().build(); BlockCacheKey key = new BlockCacheKey("key1", 0); HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1, - ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, + ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, + 52, -1, meta, HEAP); AtomicBoolean err1 = new AtomicBoolean(false); Thread t1 = new Thread(() -> { @@ -1037,7 +1038,8 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); AdaptiveLruBlockCache cache = - new AdaptiveLruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + new AdaptiveLruBlockCache(maxSize, blockSize, false, ( + int) Math.ceil(1.2 * maxSize / blockSize), AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 0.99f, // acceptable @@ -1056,10 +1058,12 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E long maxSize = 100000000; int numBlocks = 100000; final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + assertTrue("calculateBlockSize appears broken.", + blockSize * numBlocks <= maxSize); final AdaptiveLruBlockCache cache = - new AdaptiveLruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + new AdaptiveLruBlockCache(maxSize, blockSize, true, + (int) Math.ceil(1.2 * maxSize / blockSize), AdaptiveLruBlockCache.DEFAULT_LOAD_FACTOR, AdaptiveLruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min 0.99f, // acceptable From 80002aa11479640fc8f30713d20a6ac617997439 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:23:26 +0300 Subject: [PATCH 731/769] Added AdaptiveLruBlockCache + rebase --- .../hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java index fb14a050dfbd..ab2e566de9f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestAdaptiveLruBlockCache.java @@ -71,7 +71,8 @@ public void testCacheEvictionThreadSafe() throws Exception { int numBlocks = 9; int testRuns = 10; final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + assertTrue("calculateBlockSize appears broken.", + blockSize * numBlocks <= maxSize); final Configuration conf = HBaseConfiguration.create(); final AdaptiveLruBlockCache cache = new AdaptiveLruBlockCache(maxSize, blockSize); From cd42cd245cedf3bb22c1fcadb97fc1a8fe4d0c08 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:28:15 +0300 Subject: [PATCH 732/769] Added AdaptiveLruBlockCache + rebase --- .../apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index 8318d998ce59..dab90f96af19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -48,7 +48,8 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This realisation improve performance of classical LRU cache up to 3 times via reduce GC job. + * This realisation improve performance of classical LRU + * cache up to 3 times via reduce GC job. *

    * The classical block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a From 76d36d6a5a728487ad925745f911d27a3c751744 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:28:54 +0300 Subject: [PATCH 733/769] Added AdaptiveLruBlockCache + rebase --- .../hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index dab90f96af19..43034b6e891f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -51,10 +51,10 @@ * This realisation improve performance of classical LRU * cache up to 3 times via reduce GC job. *

    - * The classical block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an - * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a - * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} - * operations. + * The classical block cache implementation that is memory-aware using {@link HeapSize}, + * memory-bound using an LRU eviction algorithm, and concurrent: backed by + * a {@link ConcurrentHashMap} and with a non-blocking eviction thread giving + * constant-time {@link #cacheBlock} and {@link #getBlock} operations. *

    * Contains three levels of block priority to allow for scan-resistance and in-memory families * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An From 011c70a76d30cb911f25a59567f5c37753371523 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:33:30 +0300 Subject: [PATCH 734/769] Added AdaptiveLruBlockCache + rebase --- .../org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index 43034b6e891f..29b02dcde708 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -147,6 +147,7 @@ * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. *

    * Find more information about improvement: https://issues.apache.org/jira/browse/HBASE-23887 + * */ @InterfaceAudience.Private public class AdaptiveLruBlockCache implements FirstLevelBlockCache { From 6a8c9cb4808a7dad3d4408a5063188f4ec862a30 Mon Sep 17 00:00:00 2001 From: stack Date: Tue, 22 Sep 2020 20:48:31 -0700 Subject: [PATCH 735/769] Revert "HBASE-25068 Pass WALFactory to Replication so it knows of all WALProviders, not just default/user-space" This reverts commit 17ebf917ba354e4632b726323b2b32af3aa6c8de. --- .../hadoop/hbase/regionserver/ReplicationService.java | 11 +++++++---- .../hadoop/hbase/replication/TestReplicationBase.java | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index 33b3321755fa..e9bbaea8ae46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,11 +32,14 @@ */ @InterfaceAudience.Private public interface ReplicationService { + /** * Initializes the replication service object. + * @param walProvider can be null if not initialized inside a live region server environment, for + * example, {@code ReplicationSyncUp}. */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALFactory walFactory) - throws IOException; + void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALProvider walProvider) + throws IOException; /** * Start replication services. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index eca0d675cb7b..955cd31d1788 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information From da5ebd6e3216cbb6a7b4f05b140ed14afb353807 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 6 Oct 2020 21:09:00 +0800 Subject: [PATCH 736/769] =?UTF-8?q?Revert=20"HBASE-24813=20ReplicationSour?= =?UTF-8?q?ce=20should=20clear=20buffer=20usage=20on=20Replicatio=E2=80=A6?= =?UTF-8?q?=20(#2191)"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 687e53b7e49c1a149e55829bbeca6aa4edfe69e7. --- .../ReplicationSourceWALReader.java | 3 +- .../regionserver/TestReplicationSource.java | 50 ++----------------- 2 files changed, 4 insertions(+), 49 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index be262a6d9504..9b93bf5f9de2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -60,8 +60,7 @@ class ReplicationSourceWALReader extends Thread { private final WALEntryFilter filter; private final ReplicationSource source; - @InterfaceAudience.Private - final BlockingQueue entryBatchQueue; + private final BlockingQueue entryBatchQueue; // max (heap) size of each batch - multiply by number of batches in queue to get total private final long replicationBatchSizeCapacity; // max count of each batch - multiply by number of batches in queue to get total diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 50537b5e1be2..ce38e5409577 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -22,10 +22,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import java.io.IOException; -import java.util.ArrayList; import java.util.OptionalLong; import java.util.UUID; import java.util.concurrent.ExecutorService; @@ -274,47 +271,6 @@ public void testTerminateTimeout() throws Exception { } } - @Test - public void testTerminateClearsBuffer() throws Exception { - ReplicationSource source = new ReplicationSource(); - ReplicationSourceManager mockManager = mock(ReplicationSourceManager.class); - MetricsReplicationGlobalSourceSource mockMetrics = - mock(MetricsReplicationGlobalSourceSource.class); - AtomicLong buffer = new AtomicLong(); - Mockito.when(mockManager.getTotalBufferUsed()).thenReturn(buffer); - Mockito.when(mockManager.getGlobalMetrics()).thenReturn(mockMetrics); - ReplicationPeer mockPeer = mock(ReplicationPeer.class); - Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); - Configuration testConf = HBaseConfiguration.create(); - source.init(testConf, null, mockManager, null, mockPeer, null, - "testPeer", null, p -> OptionalLong.empty(), mock(MetricsSource.class)); - ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null, - conf, null, 0, null, source); - ReplicationSourceShipper shipper = - new ReplicationSourceShipper(conf, null, null, source); - shipper.entryReader = reader; - source.workerThreads.put("testPeer", shipper); - WALEntryBatch batch = new WALEntryBatch(10, logDir); - WAL.Entry mockEntry = mock(WAL.Entry.class); - WALEdit mockEdit = mock(WALEdit.class); - WALKeyImpl mockKey = mock(WALKeyImpl.class); - when(mockEntry.getEdit()).thenReturn(mockEdit); - when(mockEdit.isEmpty()).thenReturn(false); - when(mockEntry.getKey()).thenReturn(mockKey); - when(mockKey.estimatedSerializedSizeOf()).thenReturn(1000L); - when(mockEdit.heapSize()).thenReturn(10000L); - when(mockEdit.size()).thenReturn(0); - ArrayList cells = new ArrayList<>(); - KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"), - Bytes.toBytes("1"), Bytes.toBytes("v1")); - cells.add(kv); - when(mockEdit.getCells()).thenReturn(cells); - reader.addEntryToBatch(batch, mockEntry); - reader.entryBatchQueue.put(batch); - source.terminate("test"); - assertEquals(0, source.getSourceManager().getTotalBufferUsed().get()); - } - /** * Tests that recovered queues are preserved on a regionserver shutdown. * See HBASE-18192 @@ -484,12 +440,12 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception ServerName deadServer = ServerName.valueOf("www.deadServer.com", 12006, 1524679704419L); PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); queue.put(new Path("/www/html/test")); - RecoveredReplicationSource source = mock(RecoveredReplicationSource.class); - Server server = mock(Server.class); + RecoveredReplicationSource source = Mockito.mock(RecoveredReplicationSource.class); + Server server = Mockito.mock(Server.class); Mockito.when(server.getServerName()).thenReturn(serverName); Mockito.when(source.getServer()).thenReturn(server); Mockito.when(source.getServerWALsBelongTo()).thenReturn(deadServer); - ReplicationQueueStorage storage = mock(ReplicationQueueStorage.class); + ReplicationQueueStorage storage = Mockito.mock(ReplicationQueueStorage.class); Mockito.when(storage.getWALPosition(Mockito.eq(serverName), Mockito.any(), Mockito.any())) .thenReturn(1001L); Mockito.when(storage.getWALPosition(Mockito.eq(deadServer), Mockito.any(), Mockito.any())) From b64b9cdcae0211f9007b09ffe88a835e88ec5a47 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Tue, 6 Oct 2020 12:00:56 -0700 Subject: [PATCH 737/769] HBASE-25144 Add Hadoop-3.3.0 to personality hadoopcheck (#2492) Now that Hadoop 3.3.0 is released, let's figure out where it goes in our testing matrix. Start by adding it to precommit checks. Signed-off-by: Michael Stack Signed-off-by: Matt Foley Signed-off-by: Jan Hentschel --- dev-support/hbase-personality.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 69e77201e9ca..b502f77b5c23 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -612,9 +612,9 @@ function hadoopcheck_rebuild elif [[ "${PATCH_BRANCH}" = branch-2.2 ]] || [[ "${PATCH_BRANCH}" = branch-2.3 ]]; then yetus_info "Setting Hadoop 3 versions to test based on branch-2.2/branch-2.3 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.1.2 3.2.1" + hbase_hadoop3_versions="3.1.2 3.2.1 3.3.0" else - hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1" + hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.3.0" fi else yetus_info "Setting Hadoop 3 versions to test based on branch-2.4+/master/feature branch rules" From 0a803855ac69a5b334f8f3e8d215fc997545224f Mon Sep 17 00:00:00 2001 From: ramkrish86 Date: Sun, 11 Oct 2020 10:46:06 +0530 Subject: [PATCH 738/769] HBASE-25065 WAL archival to be done by a separate thread (#2501) * HBASE-25065 WAL archival can be batched/throttled and also done by a separate thread * Fix checkstyle issues * Address review comments * checkstyle comments * Addressing final review comments Signed-off-by: Michael Stack --- .../java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java | 1 + .../org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java | 1 + 2 files changed, 2 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 720e2c26aa40..f96ddaf7eec1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -30,6 +30,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index f57ec31c531a..0243d740e655 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -35,6 +35,7 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; From fd82569a81016e527199a29da4f113bbad5ebcf5 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Fri, 30 Oct 2020 10:41:56 -0700 Subject: [PATCH 739/769] HBASE-25167 Normalizer support for hot config reloading (#2523) Wire up the `ConfigurationObserver` chain for `RegionNormalizerManager`. The following configuration keys support hot-reloading: * hbase.normalizer.throughput.max_bytes_per_sec * hbase.normalizer.split.enabled * hbase.normalizer.merge.enabled * hbase.normalizer.min.region.count * hbase.normalizer.merge.min_region_age.days * hbase.normalizer.merge.min_region_size.mb Note that support for `hbase.normalizer.period` is not provided here. Support would need to be implemented generally for the `Chore` subsystem. Signed-off-by: Bharath Vissapragada Signed-off-by: Viraj Jasani Signed-off-by: Aman Poonia --- .../java/org/apache/hadoop/hbase/conf/ConfigurationManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java index ad26f7633cb5..cb0348239986 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java @@ -25,6 +25,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Maintains the set of all the classes which would like to get notified From 1b51946d46baa178bcf49c86e32fd6f1be1b1846 Mon Sep 17 00:00:00 2001 From: stack Date: Thu, 12 Nov 2020 17:28:37 -0800 Subject: [PATCH 740/769] HBASE-25280 [meta replicas] ArrayIndexOutOfBoundsException in ZKConnectionRegistry Signed-off-by: Duo Zhang Signed-off-by: Huaxiang Sun --- .../hbase/client/ZKConnectionRegistry.java | 36 +++++++---- .../client/TestZKConnectionRegistry.java | 61 +++++++++++++++++-- 2 files changed, 79 insertions(+), 18 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 4b31c7a6c8a6..e1da8ef7ab68 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -26,7 +26,10 @@ import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData; import java.io.IOException; +import java.util.Collection; import java.util.List; +import java.util.Map; +import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import org.apache.commons.lang3.mutable.MutableInt; @@ -111,7 +114,7 @@ private static ZooKeeperProtos.MetaRegionServer getMetaProto(byte[] data) throws data.length - prefixLen); } - private static void tryComplete(MutableInt remaining, HRegionLocation[] locs, + private static void tryComplete(MutableInt remaining, Collection locs, CompletableFuture future) { remaining.decrement(); if (remaining.intValue() > 0) { @@ -138,8 +141,15 @@ private void getMetaRegionLocation(CompletableFuture future, if (metaReplicaZNodes.isEmpty()) { future.completeExceptionally(new IOException("No meta znode available")); } - HRegionLocation[] locs = new HRegionLocation[metaReplicaZNodes.size()]; - MutableInt remaining = new MutableInt(locs.length); + // Note, the list of metaReplicaZNodes may be discontiguous regards replicaId; i.e. we may have + // a znode for the default -- replicaId=0 -- and perhaps replicaId '2' but be could be missing + // znode for replicaId '1'. This is a transient condition. Because of this we are careful + // accumulating locations. We use a Map so retries overwrite rather than aggregate and the + // Map sorts just to be kind to further processing. The Map will retain the discontinuity on + // replicaIds but on completion (of the future), the Map values are passed to the + // RegionLocations constructor which knows how to deal with discontinuities. + final Map locs = new TreeMap(); + MutableInt remaining = new MutableInt(metaReplicaZNodes.size()); for (String metaReplicaZNode : metaReplicaZNodes) { int replicaId = znodePaths.getMetaReplicaIdFromZNode(metaReplicaZNode); String path = ZNodePaths.joinZNode(znodePaths.baseZNode, metaReplicaZNode); @@ -157,9 +167,9 @@ private void getMetaRegionLocation(CompletableFuture future, if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region is in state " + stateAndServerName.getFirst()); } - locs[DEFAULT_REPLICA_ID] = new HRegionLocation( - getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); - tryComplete(remaining, locs, future); + locs.put(replicaId, new HRegionLocation( + getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond())); + tryComplete(remaining, locs.values(), future); }); } else { addListener(getAndConvert(path, ZKConnectionRegistry::getMetaProto), (proto, error) -> { @@ -168,23 +178,23 @@ private void getMetaRegionLocation(CompletableFuture future, } if (error != null) { LOG.warn("Failed to fetch " + path, error); - locs[replicaId] = null; + locs.put(replicaId, null); } else if (proto == null) { LOG.warn("Meta znode for replica " + replicaId + " is null"); - locs[replicaId] = null; + locs.put(replicaId, null); } else { Pair stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region for replica " + replicaId + " is in state " + stateAndServerName.getFirst()); - locs[replicaId] = null; + locs.put(replicaId, null); } else { - locs[replicaId] = - new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), - stateAndServerName.getSecond()); + locs.put(replicaId, new HRegionLocation( + getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), + stateAndServerName.getSecond())); } } - tryComplete(remaining, locs, future); + tryComplete(remaining, locs.values(), future); }); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 82cf0f9bbc97..d2895ed5415c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -24,26 +24,39 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.fail; - import java.io.IOException; +import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,6 +69,9 @@ public class TestZKConnectionRegistry { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); + @Rule + public final TestName name = new TestName(); + static final Logger LOG = LoggerFactory.getLogger(TestZKConnectionRegistry.class); static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -83,8 +99,7 @@ public void test() throws InterruptedException, ExecutionException, IOException clusterId); assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), REGISTRY.getActiveMaster().get()); - RegionReplicaTestHelper - .waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); + RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); RegionLocations locs = REGISTRY.getMetaRegionLocations().get(); assertEquals(3, locs.getRegionLocations().length); IntStream.range(0, 3).forEach(i -> { @@ -102,8 +117,8 @@ public void testIndependentZKConnections() throws IOException { otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); - assertNotSame("Using a different configuration / quorum should result in different " + - "backing zk connection.", zk1, zk2); + assertNotSame("Using a different configuration / quorum should result in different " + + "backing zk connection.", zk1, zk2); assertNotEquals( "Using a different configrution / quorum should be reflected in the zk connection.", zk1.getConnectString(), zk2.getConnectString()); @@ -126,4 +141,40 @@ public void testNoMetaAvailable() throws InterruptedException { } } } + + /** + * Pass discontinuous list of znodes to registry getMetaRegionLocation. Should work fine. + * It used to throw ArrayOutOfBoundsException. See HBASE-25280. + */ + @Test + public void testDiscontinuousLocations() + throws ExecutionException, InterruptedException, IOException, KeeperException, + TimeoutException { + // Write discontinuous meta replica locations to a zk namespace particular to this test to + // avoid polluting other tests. + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + this.name.getMethodName()); + ZooKeeperProtos.MetaRegionServer pbrsr = ZooKeeperProtos.MetaRegionServer.newBuilder() + .setServer(ProtobufUtil.toServerName(ServerName.valueOf("example.org,1,1"))) + .setRpcVersion(HConstants.RPC_CURRENT_VERSION) + .setState(RegionState.State.OPEN.convert()).build(); + byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); + try (ZKWatcher zkw = new ZKWatcher(conf, this.name.getMethodName(), new Abortable() { + @Override public void abort(String why, Throwable e) {} + @Override public boolean isAborted() { + return false; + } + })) { + // Write default replica and then a replica for replicaId #3. + ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(0), data); + ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(3), data); + List znodes = zkw.getMetaReplicaNodes(); + assertEquals(2, znodes.size()); + try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) { + CompletableFuture cf = registry.getMetaRegionLocations(); + RegionLocations locations = cf.get(60, TimeUnit.SECONDS); + assertEquals(2, locations.numNonNullElements()); + } + } + } } From 9ed0f4161732aee698e088dc6ea55790208b52c8 Mon Sep 17 00:00:00 2001 From: stack Date: Mon, 16 Nov 2020 08:44:28 -0800 Subject: [PATCH 741/769] Revert "HBASE-25280 [meta replicas] ArrayIndexOutOfBoundsException in ZKConnectionRegistry" This reverts commit adbf81c010a5fbfebfcc4aae68e35dbd12da1601. Premature. Still some comments to address. --- .../hbase/client/ZKConnectionRegistry.java | 36 ++++------- .../client/TestZKConnectionRegistry.java | 61 ++----------------- 2 files changed, 18 insertions(+), 79 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index e1da8ef7ab68..4b31c7a6c8a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -26,10 +26,7 @@ import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData; import java.io.IOException; -import java.util.Collection; import java.util.List; -import java.util.Map; -import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import org.apache.commons.lang3.mutable.MutableInt; @@ -114,7 +111,7 @@ private static ZooKeeperProtos.MetaRegionServer getMetaProto(byte[] data) throws data.length - prefixLen); } - private static void tryComplete(MutableInt remaining, Collection locs, + private static void tryComplete(MutableInt remaining, HRegionLocation[] locs, CompletableFuture future) { remaining.decrement(); if (remaining.intValue() > 0) { @@ -141,15 +138,8 @@ private void getMetaRegionLocation(CompletableFuture future, if (metaReplicaZNodes.isEmpty()) { future.completeExceptionally(new IOException("No meta znode available")); } - // Note, the list of metaReplicaZNodes may be discontiguous regards replicaId; i.e. we may have - // a znode for the default -- replicaId=0 -- and perhaps replicaId '2' but be could be missing - // znode for replicaId '1'. This is a transient condition. Because of this we are careful - // accumulating locations. We use a Map so retries overwrite rather than aggregate and the - // Map sorts just to be kind to further processing. The Map will retain the discontinuity on - // replicaIds but on completion (of the future), the Map values are passed to the - // RegionLocations constructor which knows how to deal with discontinuities. - final Map locs = new TreeMap(); - MutableInt remaining = new MutableInt(metaReplicaZNodes.size()); + HRegionLocation[] locs = new HRegionLocation[metaReplicaZNodes.size()]; + MutableInt remaining = new MutableInt(locs.length); for (String metaReplicaZNode : metaReplicaZNodes) { int replicaId = znodePaths.getMetaReplicaIdFromZNode(metaReplicaZNode); String path = ZNodePaths.joinZNode(znodePaths.baseZNode, metaReplicaZNode); @@ -167,9 +157,9 @@ private void getMetaRegionLocation(CompletableFuture future, if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region is in state " + stateAndServerName.getFirst()); } - locs.put(replicaId, new HRegionLocation( - getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond())); - tryComplete(remaining, locs.values(), future); + locs[DEFAULT_REPLICA_ID] = new HRegionLocation( + getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); + tryComplete(remaining, locs, future); }); } else { addListener(getAndConvert(path, ZKConnectionRegistry::getMetaProto), (proto, error) -> { @@ -178,23 +168,23 @@ private void getMetaRegionLocation(CompletableFuture future, } if (error != null) { LOG.warn("Failed to fetch " + path, error); - locs.put(replicaId, null); + locs[replicaId] = null; } else if (proto == null) { LOG.warn("Meta znode for replica " + replicaId + " is null"); - locs.put(replicaId, null); + locs[replicaId] = null; } else { Pair stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region for replica " + replicaId + " is in state " + stateAndServerName.getFirst()); - locs.put(replicaId, null); + locs[replicaId] = null; } else { - locs.put(replicaId, new HRegionLocation( - getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), - stateAndServerName.getSecond())); + locs[replicaId] = + new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), + stateAndServerName.getSecond()); } } - tryComplete(remaining, locs.values(), future); + tryComplete(remaining, locs, future); }); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index d2895ed5415c..82cf0f9bbc97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -24,39 +24,26 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.fail; + import java.io.IOException; -import java.util.List; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,9 +56,6 @@ public class TestZKConnectionRegistry { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); - @Rule - public final TestName name = new TestName(); - static final Logger LOG = LoggerFactory.getLogger(TestZKConnectionRegistry.class); static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -99,7 +83,8 @@ public void test() throws InterruptedException, ExecutionException, IOException clusterId); assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), REGISTRY.getActiveMaster().get()); - RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); + RegionReplicaTestHelper + .waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); RegionLocations locs = REGISTRY.getMetaRegionLocations().get(); assertEquals(3, locs.getRegionLocations().length); IntStream.range(0, 3).forEach(i -> { @@ -117,8 +102,8 @@ public void testIndependentZKConnections() throws IOException { otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); - assertNotSame("Using a different configuration / quorum should result in different " - + "backing zk connection.", zk1, zk2); + assertNotSame("Using a different configuration / quorum should result in different " + + "backing zk connection.", zk1, zk2); assertNotEquals( "Using a different configrution / quorum should be reflected in the zk connection.", zk1.getConnectString(), zk2.getConnectString()); @@ -141,40 +126,4 @@ public void testNoMetaAvailable() throws InterruptedException { } } } - - /** - * Pass discontinuous list of znodes to registry getMetaRegionLocation. Should work fine. - * It used to throw ArrayOutOfBoundsException. See HBASE-25280. - */ - @Test - public void testDiscontinuousLocations() - throws ExecutionException, InterruptedException, IOException, KeeperException, - TimeoutException { - // Write discontinuous meta replica locations to a zk namespace particular to this test to - // avoid polluting other tests. - Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + this.name.getMethodName()); - ZooKeeperProtos.MetaRegionServer pbrsr = ZooKeeperProtos.MetaRegionServer.newBuilder() - .setServer(ProtobufUtil.toServerName(ServerName.valueOf("example.org,1,1"))) - .setRpcVersion(HConstants.RPC_CURRENT_VERSION) - .setState(RegionState.State.OPEN.convert()).build(); - byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); - try (ZKWatcher zkw = new ZKWatcher(conf, this.name.getMethodName(), new Abortable() { - @Override public void abort(String why, Throwable e) {} - @Override public boolean isAborted() { - return false; - } - })) { - // Write default replica and then a replica for replicaId #3. - ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(0), data); - ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(3), data); - List znodes = zkw.getMetaReplicaNodes(); - assertEquals(2, znodes.size()); - try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) { - CompletableFuture cf = registry.getMetaRegionLocations(); - RegionLocations locations = cf.get(60, TimeUnit.SECONDS); - assertEquals(2, locations.numNonNullElements()); - } - } - } } From 4db5adeb1f1a5f646986d11c5eac0df06d722fef Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Mon, 16 Nov 2020 08:58:05 -0800 Subject: [PATCH 742/769] HBASE-25280 [meta replicas] ArrayIndexOutOfBoundsException in ZKConnectionRegistry (#2652) Signed-off-by: Duo Zhang Signed-off-by: Huaxiang Sun --- .../hbase/client/ZKConnectionRegistry.java | 36 +++++++---- .../client/TestZKConnectionRegistry.java | 61 +++++++++++++++++-- 2 files changed, 79 insertions(+), 18 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index 4b31c7a6c8a6..abfef3fe02a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -26,7 +26,10 @@ import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData; import java.io.IOException; +import java.util.Collection; import java.util.List; +import java.util.Map; +import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import org.apache.commons.lang3.mutable.MutableInt; @@ -111,7 +114,7 @@ private static ZooKeeperProtos.MetaRegionServer getMetaProto(byte[] data) throws data.length - prefixLen); } - private static void tryComplete(MutableInt remaining, HRegionLocation[] locs, + private static void tryComplete(MutableInt remaining, Collection locs, CompletableFuture future) { remaining.decrement(); if (remaining.intValue() > 0) { @@ -138,8 +141,15 @@ private void getMetaRegionLocation(CompletableFuture future, if (metaReplicaZNodes.isEmpty()) { future.completeExceptionally(new IOException("No meta znode available")); } - HRegionLocation[] locs = new HRegionLocation[metaReplicaZNodes.size()]; - MutableInt remaining = new MutableInt(locs.length); + // Note, the list of metaReplicaZNodes may be discontiguous regards replicaId; i.e. we may have + // a znode for the default -- replicaId=0 -- and perhaps replicaId '2' but be could be missing + // znode for replicaId '1'. This is a transient condition. Because of this we are careful + // accumulating locations. We use a Map so retries overwrite rather than aggregate and the + // Map sorts just to be kind to further processing. The Map will retain the discontinuity on + // replicaIds but on completion (of the future), the Map values are passed to the + // RegionLocations constructor which knows how to deal with discontinuities. + final Map locs = new TreeMap<>(); + MutableInt remaining = new MutableInt(metaReplicaZNodes.size()); for (String metaReplicaZNode : metaReplicaZNodes) { int replicaId = znodePaths.getMetaReplicaIdFromZNode(metaReplicaZNode); String path = ZNodePaths.joinZNode(znodePaths.baseZNode, metaReplicaZNode); @@ -157,9 +167,9 @@ private void getMetaRegionLocation(CompletableFuture future, if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region is in state " + stateAndServerName.getFirst()); } - locs[DEFAULT_REPLICA_ID] = new HRegionLocation( - getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); - tryComplete(remaining, locs, future); + locs.put(replicaId, new HRegionLocation( + getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond())); + tryComplete(remaining, locs.values(), future); }); } else { addListener(getAndConvert(path, ZKConnectionRegistry::getMetaProto), (proto, error) -> { @@ -168,23 +178,23 @@ private void getMetaRegionLocation(CompletableFuture future, } if (error != null) { LOG.warn("Failed to fetch " + path, error); - locs[replicaId] = null; + locs.put(replicaId, null); } else if (proto == null) { LOG.warn("Meta znode for replica " + replicaId + " is null"); - locs[replicaId] = null; + locs.put(replicaId, null); } else { Pair stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region for replica " + replicaId + " is in state " + stateAndServerName.getFirst()); - locs[replicaId] = null; + locs.put(replicaId, null); } else { - locs[replicaId] = - new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), - stateAndServerName.getSecond()); + locs.put(replicaId, new HRegionLocation( + getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), + stateAndServerName.getSecond())); } } - tryComplete(remaining, locs, future); + tryComplete(remaining, locs.values(), future); }); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 82cf0f9bbc97..d033f484bee4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -24,26 +24,39 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.fail; - import java.io.IOException; +import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,6 +69,9 @@ public class TestZKConnectionRegistry { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); + @Rule + public final TestName name = new TestName(); + static final Logger LOG = LoggerFactory.getLogger(TestZKConnectionRegistry.class); static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -83,8 +99,7 @@ public void test() throws InterruptedException, ExecutionException, IOException clusterId); assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), REGISTRY.getActiveMaster().get()); - RegionReplicaTestHelper - .waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); + RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); RegionLocations locs = REGISTRY.getMetaRegionLocations().get(); assertEquals(3, locs.getRegionLocations().length); IntStream.range(0, 3).forEach(i -> { @@ -102,8 +117,8 @@ public void testIndependentZKConnections() throws IOException { otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); - assertNotSame("Using a different configuration / quorum should result in different " + - "backing zk connection.", zk1, zk2); + assertNotSame("Using a different configuration / quorum should result in " + + "different backing zk connection.", zk1, zk2); assertNotEquals( "Using a different configrution / quorum should be reflected in the zk connection.", zk1.getConnectString(), zk2.getConnectString()); @@ -126,4 +141,40 @@ public void testNoMetaAvailable() throws InterruptedException { } } } + + /** + * Pass discontinuous list of znodes to registry getMetaRegionLocation. Should work fine. + * It used to throw ArrayOutOfBoundsException. See HBASE-25280. + */ + @Test + public void testDiscontinuousLocations() + throws ExecutionException, InterruptedException, IOException, KeeperException, + TimeoutException { + // Write discontinuous meta replica locations to a zk namespace particular to this test to + // avoid polluting other tests. + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + this.name.getMethodName()); + ZooKeeperProtos.MetaRegionServer pbrsr = ZooKeeperProtos.MetaRegionServer.newBuilder() + .setServer(ProtobufUtil.toServerName(ServerName.valueOf("example.org,1,1"))) + .setRpcVersion(HConstants.RPC_CURRENT_VERSION) + .setState(RegionState.State.OPEN.convert()).build(); + byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); + try (ZKWatcher zkw = new ZKWatcher(conf, this.name.getMethodName(), new Abortable() { + @Override public void abort(String why, Throwable e) {} + @Override public boolean isAborted() { + return false; + } + })) { + // Write default replica and then a replica for replicaId #3. + ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(0), data); + ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(3), data); + List znodes = zkw.getMetaReplicaNodes(); + assertEquals(2, znodes.size()); + try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) { + CompletableFuture cf = registry.getMetaRegionLocations(); + RegionLocations locations = cf.get(60, TimeUnit.SECONDS); + assertEquals(2, locations.numNonNullElements()); + } + } + } } From f3f3dd9a52c68ae92aebe9bdf500009e64febd82 Mon Sep 17 00:00:00 2001 From: stack Date: Mon, 16 Nov 2020 08:59:12 -0800 Subject: [PATCH 743/769] Revert "HBASE-25280 [meta replicas] ArrayIndexOutOfBoundsException in ZKConnectionRegistry (#2652)" This reverts commit 6210dafc47a04306c1d90dd3e7395015f442a81e. Applied to master when should have been applied to branch. Revert. --- .../hbase/client/ZKConnectionRegistry.java | 36 ++++------- .../client/TestZKConnectionRegistry.java | 61 ++----------------- 2 files changed, 18 insertions(+), 79 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index abfef3fe02a8..4b31c7a6c8a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -26,10 +26,7 @@ import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData; import java.io.IOException; -import java.util.Collection; import java.util.List; -import java.util.Map; -import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import org.apache.commons.lang3.mutable.MutableInt; @@ -114,7 +111,7 @@ private static ZooKeeperProtos.MetaRegionServer getMetaProto(byte[] data) throws data.length - prefixLen); } - private static void tryComplete(MutableInt remaining, Collection locs, + private static void tryComplete(MutableInt remaining, HRegionLocation[] locs, CompletableFuture future) { remaining.decrement(); if (remaining.intValue() > 0) { @@ -141,15 +138,8 @@ private void getMetaRegionLocation(CompletableFuture future, if (metaReplicaZNodes.isEmpty()) { future.completeExceptionally(new IOException("No meta znode available")); } - // Note, the list of metaReplicaZNodes may be discontiguous regards replicaId; i.e. we may have - // a znode for the default -- replicaId=0 -- and perhaps replicaId '2' but be could be missing - // znode for replicaId '1'. This is a transient condition. Because of this we are careful - // accumulating locations. We use a Map so retries overwrite rather than aggregate and the - // Map sorts just to be kind to further processing. The Map will retain the discontinuity on - // replicaIds but on completion (of the future), the Map values are passed to the - // RegionLocations constructor which knows how to deal with discontinuities. - final Map locs = new TreeMap<>(); - MutableInt remaining = new MutableInt(metaReplicaZNodes.size()); + HRegionLocation[] locs = new HRegionLocation[metaReplicaZNodes.size()]; + MutableInt remaining = new MutableInt(locs.length); for (String metaReplicaZNode : metaReplicaZNodes) { int replicaId = znodePaths.getMetaReplicaIdFromZNode(metaReplicaZNode); String path = ZNodePaths.joinZNode(znodePaths.baseZNode, metaReplicaZNode); @@ -167,9 +157,9 @@ private void getMetaRegionLocation(CompletableFuture future, if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region is in state " + stateAndServerName.getFirst()); } - locs.put(replicaId, new HRegionLocation( - getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond())); - tryComplete(remaining, locs.values(), future); + locs[DEFAULT_REPLICA_ID] = new HRegionLocation( + getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); + tryComplete(remaining, locs, future); }); } else { addListener(getAndConvert(path, ZKConnectionRegistry::getMetaProto), (proto, error) -> { @@ -178,23 +168,23 @@ private void getMetaRegionLocation(CompletableFuture future, } if (error != null) { LOG.warn("Failed to fetch " + path, error); - locs.put(replicaId, null); + locs[replicaId] = null; } else if (proto == null) { LOG.warn("Meta znode for replica " + replicaId + " is null"); - locs.put(replicaId, null); + locs[replicaId] = null; } else { Pair stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region for replica " + replicaId + " is in state " + stateAndServerName.getFirst()); - locs.put(replicaId, null); + locs[replicaId] = null; } else { - locs.put(replicaId, new HRegionLocation( - getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), - stateAndServerName.getSecond())); + locs[replicaId] = + new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), + stateAndServerName.getSecond()); } } - tryComplete(remaining, locs.values(), future); + tryComplete(remaining, locs, future); }); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index d033f484bee4..82cf0f9bbc97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -24,39 +24,26 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.fail; + import java.io.IOException; -import java.util.List; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,9 +56,6 @@ public class TestZKConnectionRegistry { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); - @Rule - public final TestName name = new TestName(); - static final Logger LOG = LoggerFactory.getLogger(TestZKConnectionRegistry.class); static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -99,7 +83,8 @@ public void test() throws InterruptedException, ExecutionException, IOException clusterId); assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), REGISTRY.getActiveMaster().get()); - RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); + RegionReplicaTestHelper + .waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); RegionLocations locs = REGISTRY.getMetaRegionLocations().get(); assertEquals(3, locs.getRegionLocations().length); IntStream.range(0, 3).forEach(i -> { @@ -117,8 +102,8 @@ public void testIndependentZKConnections() throws IOException { otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); - assertNotSame("Using a different configuration / quorum should result in " + - "different backing zk connection.", zk1, zk2); + assertNotSame("Using a different configuration / quorum should result in different " + + "backing zk connection.", zk1, zk2); assertNotEquals( "Using a different configrution / quorum should be reflected in the zk connection.", zk1.getConnectString(), zk2.getConnectString()); @@ -141,40 +126,4 @@ public void testNoMetaAvailable() throws InterruptedException { } } } - - /** - * Pass discontinuous list of znodes to registry getMetaRegionLocation. Should work fine. - * It used to throw ArrayOutOfBoundsException. See HBASE-25280. - */ - @Test - public void testDiscontinuousLocations() - throws ExecutionException, InterruptedException, IOException, KeeperException, - TimeoutException { - // Write discontinuous meta replica locations to a zk namespace particular to this test to - // avoid polluting other tests. - Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + this.name.getMethodName()); - ZooKeeperProtos.MetaRegionServer pbrsr = ZooKeeperProtos.MetaRegionServer.newBuilder() - .setServer(ProtobufUtil.toServerName(ServerName.valueOf("example.org,1,1"))) - .setRpcVersion(HConstants.RPC_CURRENT_VERSION) - .setState(RegionState.State.OPEN.convert()).build(); - byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); - try (ZKWatcher zkw = new ZKWatcher(conf, this.name.getMethodName(), new Abortable() { - @Override public void abort(String why, Throwable e) {} - @Override public boolean isAborted() { - return false; - } - })) { - // Write default replica and then a replica for replicaId #3. - ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(0), data); - ZKUtil.createSetData(zkw, zkw.getZNodePaths().getZNodeForReplica(3), data); - List znodes = zkw.getMetaReplicaNodes(); - assertEquals(2, znodes.size()); - try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) { - CompletableFuture cf = registry.getMetaRegionLocations(); - RegionLocations locations = cf.get(60, TimeUnit.SECONDS); - assertEquals(2, locations.numNonNullElements()); - } - } - } } From 258657a9d03621128eb3f872864b3d1588fa1cb9 Mon Sep 17 00:00:00 2001 From: stack Date: Fri, 18 Sep 2020 17:29:23 -0700 Subject: [PATCH 744/769] HBASE-25068 Pass WALFactory to Replication so it knows of all WALProviders, not just default/user-space Pass WALFactory to Replication instead of WALProvider. WALFactory has all WALProviders in it, not just the user-space WALProvider. Do this so ReplicationService has access to all WALProviders in the Server (To be exploited by the follow-on patch in HBASE-25055) --- .../hadoop/hbase/regionserver/ReplicationService.java | 11 ++++------- .../hbase/replication/regionserver/Replication.java | 1 + .../hadoop/hbase/replication/TestReplicationBase.java | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index e9bbaea8ae46..33b3321755fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; -import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.yetus.audience.InterfaceAudience; /** @@ -32,14 +32,11 @@ */ @InterfaceAudience.Private public interface ReplicationService { - /** * Initializes the replication service object. - * @param walProvider can be null if not initialized inside a live region server environment, for - * example, {@code ReplicationSyncUp}. */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALProvider walProvider) - throws IOException; + void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, WALFactory walFactory) + throws IOException; /** * Start replication services. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index c6b05b427082..3f9b30a7fede 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -115,6 +115,7 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager(); this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); + WALProvider walProvider = walFactory.getWALProvider(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 955cd31d1788..eca0d675cb7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information From bbe2c7b0556a7d2a6c7e3d7a411e23a0ccde89d3 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 2 Oct 2020 12:29:18 -0700 Subject: [PATCH 745/769] =?UTF-8?q?HBASE-25055=20Add=20ReplicationSource?= =?UTF-8?q?=20for=20meta=20WALs;=20add=20enable/disable=20w=E2=80=A6=20(#2?= =?UTF-8?q?451)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * HBASE-25055 Add ReplicationSource for meta WALs; add enable/disable when hbase:meta assigned to RS Fill in gap left by HBASE-11183 'Timeline Consistent region replicas - Phase 2 design'. HBASE-11183 left off implementing 'async WAL Replication' on the hbase:meta Table; hbase:meta Table could only do Phase 1 Region Replicas reading the primary Regions' hfiles. Here we add 'async WAL Replication' to hbase:meta so Replicas can be more current with the primary's changes. Adds a 'special' ReplicationSource that reads hbase:meta WAL files and replicates all edits to the configured in-cluster endpoint (Defaults to the RegionReadReplicaEndpoint.class -- set hbase.region.replica.catalog.replication to target a different endpoint implementation). Set hbase.region.replica.replication.catalog.enabled to enable async WAL Replication for hbase:meta region replicas. Its off by default. The CatalogReplicationSource for async WAL Replication of hbase:meta does NOT need to keep up WAL offset or a queue of WALs-to-replicate in the replication queue store as is done in other ReplicationSource implementations; the CatalogReplicationSource is for Region Replicas only. General Replication does not replicate hbase:meta. hbase:meta Region Replicas reset on crash of the primary replica so there is no need to 'recover' replication that was running on the crashed server. Because it so different in operation, the CatalogReplicationSource is bolted on to the side of the ReplicationSourceManager. It is lazily instantiated to match the lazy instantiation of the hbase:meta WALProvider, created and started on the open of the first Region of an hbase:meta table. Thereafter it stays up till the process dies, even if all hbase:meta Regions have moved off the server, in case a hbase:meta Region is moved back (Doing this latter simplifies the implementation) hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Read configuration to see if we need to wait on setting a Region read-enabled (if so, replicas will only flip to enable read after confirming a flush of the primary so they for sure are a replica of a known point) hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java If configured, on open of hbase:meta, ask the ReplicationSourceManager to add a ReplicationSource (if it hasn't already). hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java Edit log message. hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java If configured, on close of hbase:meta, update ReplicationSourceManager that a source Region has closed. hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java javadoc and make constructor private. hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java Add logPositionAndCleanOldLogs w/ default of the old behavior so CatalogReplicationSource can bypass updating store with WAL position, etc. hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java Add creation and start of an CatalogReplicationSource. hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java Go via ReplicationSource when calling logPostionAndCleanOldLogs so new RS can intercept. hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java Javadoc. hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java Add utility for reading configurations for hbase:meta region replicas. hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java Javadoc. hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java Use define. hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSource.java Specical version of ReplicationSource for Region Replicas on hbase:meta. hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/CatalogReplicationSourcePeer.java Needs a special peer too (peers are baked into replication though we don't use 'peers' here) hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplicationEndpoint.java hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java Tests. Signed-off-by: Duo Zhang Signed-off-by: Huaxiang Sun --- .../hadoop/hbase/replication/regionserver/Replication.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 3f9b30a7fede..c6b05b427082 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -115,7 +115,6 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager(); this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); - WALProvider walProvider = walFactory.getWALProvider(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource); From 4e433c16478f81617d16bc33c645106c32585a84 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 25 Nov 2020 14:10:12 -0800 Subject: [PATCH 746/769] HBASE-24640 Purge use of VisibleForTesting (#2695) Signed-off-by: Reid Chan Signed-off-by: Nick Dimiduk --- .../java/org/apache/hadoop/hbase/conf/ConfigurationManager.java | 1 - .../java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java | 1 - .../org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java | 1 - 3 files changed, 3 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java index cb0348239986..ad26f7633cb5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java @@ -25,7 +25,6 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Maintains the set of all the classes which would like to get notified diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index f96ddaf7eec1..720e2c26aa40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -30,7 +30,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index 0243d740e655..f57ec31c531a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -35,7 +35,6 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; From aa4ddf5e54c5ec98e6104add7b606d21003b679a Mon Sep 17 00:00:00 2001 From: Qi Yu Date: Fri, 27 Nov 2020 08:20:24 +0800 Subject: [PATCH 747/769] HBASE-25324 Remove unnecessary array to list conversion in SplitLogManager (#2703) Signed-off-by: Guanghao Zhang Signed-off-by: Viraj Jasani Signed-off-by: stack --- .../java/org/apache/hadoop/hbase/master/SplitWALManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index e72b607a7f8e..6db094c4e6df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WALSplitUtil; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; From e77a4b098cd20cb62a678f09e595030b7c0a1c68 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Sat, 28 Nov 2020 05:01:22 -0800 Subject: [PATCH 748/769] HBASE-25292 Improve InetSocketAddress usage discipline (#2669) Network identities should be bound late. Remote addresses should be resolved at the last possible moment, just before connect(). Network identity mappings can change, so our code should not inappropriately cache them. Otherwise we might miss a change and fail to operate normally. Revert "HBASE-14544 Allow HConnectionImpl to not refresh the dns on errors" Removes hbase.resolve.hostnames.on.failure and related code. We always resolve hostnames, as late as possible. Preserve InetSocketAddress caching per RPC connection. Avoids potential lookups per Call. Replace InetSocketAddress with Address where used as a map key. If we want to key by hostname and/or resolved address we should be explicit about it. Using Address chooses mapping by hostname and port only. Add metrics for potential nameservice resolution attempts, whenever an InetSocketAddress is instantiated for connect; and metrics for failed resolution, whenever InetSocketAddress#isUnresolved on the new instance is true. * Use ServerName directly to build a stub key * Resolve and cache ISA on a RpcChannel as late as possible, at first call * Remove now invalid unit test TestCIBadHostname We resolve DNS at the latest possible time, at first call, and do not resolve hostnames for creating stubs at all, so this unit test cannot work now. Reviewed-by: Mingliang Liu Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 8a1ac5aac76d..5ed5b9ed6fff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -28,6 +28,8 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.Optional; import java.util.concurrent.CompletableFuture; From 768eef65e481778b7ea153cae7b85c1f1075e810 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 3 Dec 2020 21:16:24 +0530 Subject: [PATCH 749/769] Revert "HBASE-25246 Backup/Restore hbase cell tags" This reverts commit 56eb5c9fc8de796665fe959087cb24b9f5f1afd4. --- .../main/java/org/apache/hadoop/hbase/mapreduce/Import.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 30071fdfd809..239a12bdc688 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -512,7 +511,6 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { - List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -526,8 +524,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength(), // value length - tags.size() == 0 ? null: tags); + kv.getValueLength()); // value length } } return kv; From a1c23b68caaf71789a2768e76eb0377759498455 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 9 Dec 2020 16:54:18 -0800 Subject: [PATCH 750/769] HBASE-25380 [create-release] Add timestamping to log output (#2758) Added logging of timestamp so we can tell where we are spending time. Added context to the README copied from head of entrance script. Signed-off-by: Andrew Purtell --- dev-support/create-release/release-util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index d907253dffe6..0e9eea174979 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -512,7 +512,7 @@ function get_jira_name { if [[ -z "$jira_name" ]]; then error "Sorry, can't determine the Jira name for project $project" fi - echo "$jira_name" + log "$jira_name" } # Update the CHANGES.md From 7bfdf416b0a93106685623c403a235169d0b710a Mon Sep 17 00:00:00 2001 From: Huaxiang Sun Date: Thu, 10 Dec 2020 10:12:53 -0800 Subject: [PATCH 751/769] Revert "HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side." This reverts commit c1aa3b24e930e2c47ff4d7f6e286cb450458dffc. --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 -- ...talogReplicaLoadBalanceSimpleSelector.java | 19 ++++++++++--------- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 1c686aca8b76..2c2520f8bd12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + int numOfReplicas = 1; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index 27be88a9def2..c3ce868757f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,8 +28,6 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { - int UNINITIALIZED_NUM_OF_REPLICAS = -1; - /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index 01996b34e2ef..bc8264050149 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,6 +108,7 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; + private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -116,7 +117,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -145,7 +146,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -261,16 +262,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - // If the returned number of replicas is -1, it is caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. - if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { - LOG.error("Failed to fetch Table {}'s region replica count", tableName); - return this.numOfReplicas; + if (newNumOfReplicas == 1) { + LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " + + "fetch the replica count", tableName); } - int cachedNumOfReplicas = this.numOfReplicas; + + // If the returned number of replicas is 1, it is mostly caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - (cachedNumOfReplicas != newNumOfReplicas)) { + ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; From 350cc5c0dd84008cfd5cd168b793bcf12fa9c032 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Tue, 15 Dec 2020 21:45:39 -0800 Subject: [PATCH 752/769] HBASE-25293 Followup jira to address the client handling issue when chaning from meta replica to non-meta-replica at the server side. (#2768) Signed-off-by: stack --- .../client/AsyncNonMetaRegionLocator.java | 2 +- .../CatalogReplicaLoadBalanceSelector.java | 2 ++ ...talogReplicaLoadBalanceSimpleSelector.java | 19 +++++++++---------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 2c2520f8bd12..1c686aca8b76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -211,7 +211,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = 1; + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index c3ce868757f1..27be88a9def2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -28,6 +28,8 @@ @InterfaceAudience.Private interface CatalogReplicaLoadBalanceSelector { + int UNINITIALIZED_NUM_OF_REPLICAS = -1; + /** * This method is called when input location is stale, i.e, when clients run into * org.apache.hadoop.hbase.NotServingRegionException. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index bc8264050149..01996b34e2ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -108,7 +108,6 @@ public String toString() { private final TableName tableName; private final IntSupplier getNumOfReplicas; private volatile boolean isStopped = false; - private final static int UNINITIALIZED_NUM_OF_REPLICAS = -1; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, IntSupplier getNumOfReplicas) { @@ -117,7 +116,7 @@ public String toString() { this.getNumOfReplicas = getNumOfReplicas; // This numOfReplicas is going to be lazy initialized. - this.numOfReplicas = UNINITIALIZED_NUM_OF_REPLICAS; + this.numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; // Start chores this.conn.getChoreService().scheduleChore(getCacheCleanupChore(this)); this.conn.getChoreService().scheduleChore(getRefreshReplicaCountChore(this)); @@ -146,7 +145,7 @@ public void onError(HRegionLocation loc) { */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; - if (cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) { + if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } @@ -262,16 +261,16 @@ private void cleanupReplicaReplicaStaleCache() { private int refreshCatalogReplicaCount() { int newNumOfReplicas = this.getNumOfReplicas.getAsInt(); LOG.debug("Refreshed replica count {}", newNumOfReplicas); - if (newNumOfReplicas == 1) { - LOG.warn("Table {}'s region replica count is 1, maybe a misconfiguration or failure to " - + "fetch the replica count", tableName); + // If the returned number of replicas is -1, it is caused by failure to fetch the + // replica count. Do not update the numOfReplicas in this case. + if (newNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { + LOG.error("Failed to fetch Table {}'s region replica count", tableName); + return this.numOfReplicas; } - int cachedNumOfReplicas = this.numOfReplicas; - // If the returned number of replicas is 1, it is mostly caused by failure to fetch the - // replica count. Do not update the numOfReplicas in this case. + int cachedNumOfReplicas = this.numOfReplicas; if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - ((cachedNumOfReplicas != newNumOfReplicas) && (newNumOfReplicas != 1))) { + (cachedNumOfReplicas != newNumOfReplicas)) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; From d2b3f39e4bd8154f17e5e294c1b03a1b1e89b2e3 Mon Sep 17 00:00:00 2001 From: shahrs87 Date: Wed, 16 Dec 2020 18:46:21 +0530 Subject: [PATCH 753/769] HBASE-25246 Backup/Restore hbase cell tags Closes #2745 Signed-off-by: Anoop Sam John Signed-off-by: Viraj Jasani --- .../main/java/org/apache/hadoop/hbase/mapreduce/Import.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 239a12bdc688..30071fdfd809 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -511,6 +512,7 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { + List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length @@ -524,7 +526,8 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset - kv.getValueLength()); // value length + kv.getValueLength(), // value length + tags.size() == 0 ? null: tags); } } return kv; From 1510eb5a1104ae729f9ec410c536112d169c215b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 19 Dec 2020 20:28:30 +0800 Subject: [PATCH 754/769] Revert "HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) (#2753)" This reverts commit c3276801256aa16a62e5cdba7a37d4e18d59e880. --- .../apache/hadoop/hbase/client/RegionInfo.java | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index b6bdd0103de8..d7460e9d15ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,23 +363,7 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - if (parseRegionNameOrReturnNull(regionName) == null) { - if (regionName.length > MD5_HEX_LENGTH) { - return false; - } else if (regionName.length == MD5_HEX_LENGTH) { - return true; - } else { - String encodedName = Bytes.toString(regionName); - try { - Integer.parseInt(encodedName); - // If this is a valid integer, it could be hbase:meta's encoded region name. - return true; - } catch(NumberFormatException er) { - return false; - } - } - } - return false; + return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; } /** From 2d3b3ace9bca2ecfb0d546b8cd48aa945c762fe3 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 4 Jan 2021 23:30:32 +0800 Subject: [PATCH 755/769] =?UTF-8?q?HBASE-25457=20Possible=20race=20in=20As?= =?UTF-8?q?yncConnectionImpl=20between=20getChoreServ=E2=80=A6=20(#2839)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/client/AsyncConnectionImpl.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 5ed5b9ed6fff..8a1ac5aac76d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -28,8 +28,6 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.Optional; import java.util.concurrent.CompletableFuture; From d62a55180a3a81e1cc114f9e7c16e1fbc65f9915 Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Tue, 5 Jan 2021 10:21:26 +0000 Subject: [PATCH 756/769] =?UTF-8?q?HBASE-24813=20ReplicationSource=20shoul?= =?UTF-8?q?d=20clear=20buffer=20usage=20on=20Replicatio=E2=80=A6=20(#2546)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ankit Singhal --- .../ReplicationSourceWALReader.java | 3 +- .../regionserver/TestReplicationSource.java | 50 +++++++++++++++++-- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index 9b93bf5f9de2..be262a6d9504 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -60,7 +60,8 @@ class ReplicationSourceWALReader extends Thread { private final WALEntryFilter filter; private final ReplicationSource source; - private final BlockingQueue entryBatchQueue; + @InterfaceAudience.Private + final BlockingQueue entryBatchQueue; // max (heap) size of each batch - multiply by number of batches in queue to get total private final long replicationBatchSizeCapacity; // max count of each batch - multiply by number of batches in queue to get total diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index ce38e5409577..50537b5e1be2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -22,7 +22,10 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.ArrayList; import java.util.OptionalLong; import java.util.UUID; import java.util.concurrent.ExecutorService; @@ -271,6 +274,47 @@ public void testTerminateTimeout() throws Exception { } } + @Test + public void testTerminateClearsBuffer() throws Exception { + ReplicationSource source = new ReplicationSource(); + ReplicationSourceManager mockManager = mock(ReplicationSourceManager.class); + MetricsReplicationGlobalSourceSource mockMetrics = + mock(MetricsReplicationGlobalSourceSource.class); + AtomicLong buffer = new AtomicLong(); + Mockito.when(mockManager.getTotalBufferUsed()).thenReturn(buffer); + Mockito.when(mockManager.getGlobalMetrics()).thenReturn(mockMetrics); + ReplicationPeer mockPeer = mock(ReplicationPeer.class); + Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L); + Configuration testConf = HBaseConfiguration.create(); + source.init(testConf, null, mockManager, null, mockPeer, null, + "testPeer", null, p -> OptionalLong.empty(), mock(MetricsSource.class)); + ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null, + conf, null, 0, null, source); + ReplicationSourceShipper shipper = + new ReplicationSourceShipper(conf, null, null, source); + shipper.entryReader = reader; + source.workerThreads.put("testPeer", shipper); + WALEntryBatch batch = new WALEntryBatch(10, logDir); + WAL.Entry mockEntry = mock(WAL.Entry.class); + WALEdit mockEdit = mock(WALEdit.class); + WALKeyImpl mockKey = mock(WALKeyImpl.class); + when(mockEntry.getEdit()).thenReturn(mockEdit); + when(mockEdit.isEmpty()).thenReturn(false); + when(mockEntry.getKey()).thenReturn(mockKey); + when(mockKey.estimatedSerializedSizeOf()).thenReturn(1000L); + when(mockEdit.heapSize()).thenReturn(10000L); + when(mockEdit.size()).thenReturn(0); + ArrayList cells = new ArrayList<>(); + KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"), + Bytes.toBytes("1"), Bytes.toBytes("v1")); + cells.add(kv); + when(mockEdit.getCells()).thenReturn(cells); + reader.addEntryToBatch(batch, mockEntry); + reader.entryBatchQueue.put(batch); + source.terminate("test"); + assertEquals(0, source.getSourceManager().getTotalBufferUsed().get()); + } + /** * Tests that recovered queues are preserved on a regionserver shutdown. * See HBASE-18192 @@ -440,12 +484,12 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception ServerName deadServer = ServerName.valueOf("www.deadServer.com", 12006, 1524679704419L); PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); queue.put(new Path("/www/html/test")); - RecoveredReplicationSource source = Mockito.mock(RecoveredReplicationSource.class); - Server server = Mockito.mock(Server.class); + RecoveredReplicationSource source = mock(RecoveredReplicationSource.class); + Server server = mock(Server.class); Mockito.when(server.getServerName()).thenReturn(serverName); Mockito.when(source.getServer()).thenReturn(server); Mockito.when(source.getServerWALsBelongTo()).thenReturn(deadServer); - ReplicationQueueStorage storage = Mockito.mock(ReplicationQueueStorage.class); + ReplicationQueueStorage storage = mock(ReplicationQueueStorage.class); Mockito.when(storage.getWALPosition(Mockito.eq(serverName), Mockito.any(), Mockito.any())) .thenReturn(1001L); Mockito.when(storage.getWALPosition(Mockito.eq(deadServer), Mockito.any(), Mockito.any())) From 8c2f8879bc62223a3f212eb93118f478d9f17870 Mon Sep 17 00:00:00 2001 From: Anjan Das Date: Thu, 7 Jan 2021 15:31:50 +0530 Subject: [PATCH 757/769] HBASE-25445: Use WAL FS instead of master FS in SplitWALManager (#2844) Signed-off-by: Pankaj Signed-off-by: ramkrish86 Signed-off-by: Viraj Jasani --- .../hbase/master/TestSplitWALManager.java | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index e1f318869bab..fc5c68a67cff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -31,6 +31,14 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -90,6 +98,58 @@ public void teardown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + @Test + public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{ + HBaseTestingUtility test_util_2 = new HBaseTestingUtility(); + Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); + test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); + CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir); + test_util_2.startMiniCluster(3); + HMaster master2 = test_util_2.getHBaseCluster().getMaster(); + LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem() + .getFileSystem().getUri()); + LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem() + .getWALFileSystem().getUri()); + Table table = test_util_2.createTable(TABLE_NAME, FAMILY); + test_util_2.waitTableAvailable(TABLE_NAME); + Admin admin = test_util_2.getAdmin(); + MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster() + .getMasterProcedureExecutor().getEnvironment(); + final ProcedureExecutor executor = test_util_2.getMiniHBaseCluster() + .getMaster().getMasterProcedureExecutor(); + List regionInfos = admin.getRegions(TABLE_NAME); + SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure( + env, regionInfos.get(0), Bytes.toBytes("row5")); + // Populate some rows in the table + LOG.info("Beginning put data to the table: " + TABLE_NAME.toString()); + int rowCount = 5; + for (int i = 0; i < rowCount; i++) { + byte[] row = Bytes.toBytes("row" + i); + Put put = new Put(row); + put.addColumn(FAMILY, FAMILY, FAMILY); + table.put(put); + } + executor.submitProcedure(splitProcedure); + LOG.info("Submitted SplitProcedure."); + test_util_2.waitFor(30000, () -> executor.getProcedures().stream() + .filter(p -> p instanceof TransitRegionStateProcedure) + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); + test_util_2.getMiniHBaseCluster().killRegionServer( + test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName()); + test_util_2.getMiniHBaseCluster().startRegionServer(); + test_util_2.waitUntilNoRegionsInTransition(); + Scan scan = new Scan(); + ResultScanner results = table.getScanner(scan); + int scanRowCount = 0; + while (results.next() != null) { + scanRowCount++; + } + Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount + + " were expected.", rowCount, scanRowCount); + test_util_2.shutdownMiniCluster(); + } + @Test public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>(); From 583204af1b911c2174f55f287ceb3a36eeee73a9 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 8 Jan 2021 12:38:45 -0800 Subject: [PATCH 758/769] HBASE-25487 [create-release] changes.md update broken (#2864) Signed-off-by: Huaxiang Sun --- dev-support/create-release/release-util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index 0e9eea174979..d907253dffe6 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -512,7 +512,7 @@ function get_jira_name { if [[ -z "$jira_name" ]]; then error "Sorry, can't determine the Jira name for project $project" fi - log "$jira_name" + echo "$jira_name" } # Update the CHANGES.md From 40e7fba429b7b6c47d6fe006db62b9e69eff5e25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Fri, 8 Jan 2021 14:10:30 -0500 Subject: [PATCH 759/769] HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml Signed-off-by: Josh Elser --- .../hadoop/hbase/TestHBaseConfiguration.java | 17 ++++++++ .../src/test/resources/hdfs-default.xml | 42 +++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 hbase-common/src/test/resources/hdfs-default.xml diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index ffa94ba2d59f..a20f34718edb 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -115,6 +115,23 @@ public void testSecurityConfCaseInsensitive() { conf.set("hbase.security.authentication", "KERBeros"); Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } + + @Test + public void testGetConfigOfShortcircuitRead() throws Exception { + Configuration conf = HBaseConfiguration.create(); + Configuration.addDefaultResource("hdfs-default.xml"); + assertEquals("hdfs-default.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("false", conf.get("dfs.client.read.shortcircuit")); + assertNull(conf.get("dfs.domain.socket.path")); + Configuration.addDefaultResource("hdfs-scr-enabled.xml"); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.client.read.shortcircuit")[0]); + assertEquals("hdfs-scr-enabled.xml", + conf.getPropertySources("dfs.domain.socket.path")[0]); + assertEquals("true", conf.get("dfs.client.read.shortcircuit")); + assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); + } @Test public void testGetConfigOfShortcircuitRead() throws Exception { diff --git a/hbase-common/src/test/resources/hdfs-default.xml b/hbase-common/src/test/resources/hdfs-default.xml new file mode 100644 index 000000000000..fdb3c36edc87 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-default.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + false + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + From b05088e793d5598d80cc06154aa20f29ac04f23f Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 12 Jan 2021 16:08:54 +0800 Subject: [PATCH 760/769] Revert "HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml" This reverts commit 49aba571813f649a2ff4482a2209ee9910cc72c3. --- .../src/main/resources/hbase-default.xml | 4 +- .../hadoop/hbase/TestHBaseConfiguration.java | 17 -------- .../src/test/resources/hdfs-default.xml | 42 ------------------- .../src/test/resources/hdfs-scr-enabled.xml | 42 ------------------- 4 files changed, 2 insertions(+), 103 deletions(-) delete mode 100644 hbase-common/src/test/resources/hdfs-default.xml delete mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 20f3881edb2c..9092dd147198 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - + false If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - + none This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index a20f34718edb..ffa94ba2d59f 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -115,23 +115,6 @@ public void testSecurityConfCaseInsensitive() { conf.set("hbase.security.authentication", "KERBeros"); Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); } - - @Test - public void testGetConfigOfShortcircuitRead() throws Exception { - Configuration conf = HBaseConfiguration.create(); - Configuration.addDefaultResource("hdfs-default.xml"); - assertEquals("hdfs-default.xml", - conf.getPropertySources("dfs.client.read.shortcircuit")[0]); - assertEquals("false", conf.get("dfs.client.read.shortcircuit")); - assertNull(conf.get("dfs.domain.socket.path")); - Configuration.addDefaultResource("hdfs-scr-enabled.xml"); - assertEquals("hdfs-scr-enabled.xml", - conf.getPropertySources("dfs.client.read.shortcircuit")[0]); - assertEquals("hdfs-scr-enabled.xml", - conf.getPropertySources("dfs.domain.socket.path")[0]); - assertEquals("true", conf.get("dfs.client.read.shortcircuit")); - assertEquals("/var/lib/hadoop-hdfs/dn_socket", conf.get("dfs.domain.socket.path")); - } @Test public void testGetConfigOfShortcircuitRead() throws Exception { diff --git a/hbase-common/src/test/resources/hdfs-default.xml b/hbase-common/src/test/resources/hdfs-default.xml deleted file mode 100644 index fdb3c36edc87..000000000000 --- a/hbase-common/src/test/resources/hdfs-default.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - dfs.client.read.shortcircuit - false - - If set to true, this configuration parameter enables short-circuit local - reads. - - - - dfs.domain.socket.path - - - Optional. This is a path to a UNIX domain socket that will be used for - communication between the DataNode and local HDFS clients. - If the string "_PORT" is present in this path, it will be replaced by the - TCP port of the DataNode. - - - diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml deleted file mode 100644 index 8594494782c5..000000000000 --- a/hbase-common/src/test/resources/hdfs-scr-enabled.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - dfs.client.read.shortcircuit - true - - If set to true, this configuration parameter enables short-circuit local - reads. - - - - dfs.domain.socket.path - /var/lib/hadoop-hdfs/dn_socket - - Optional. This is a path to a UNIX domain socket that will be used for - communication between the DataNode and local HDFS clients. - If the string "_PORT" is present in this path, it will be replaced by the - TCP port of the DataNode. - - - From b2daed5d52c6adbca3429e1bc6d48db7e08f5395 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 13 Jan 2021 05:01:26 +0530 Subject: [PATCH 761/769] HBASE-25211 : Rack awareness in RegionMover (#2795) Signed-off-by: Andrew Purtell --- .../src/main/java/org/apache/hadoop/hbase/util/RegionMover.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 778d66da63d8..fcc3acabcd27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -87,6 +87,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { public static final int DEFAULT_MOVE_RETRIES_MAX = 5; public static final int DEFAULT_MOVE_WAIT_MAX = 60; public static final int DEFAULT_SERVERSTART_WAIT_MAX = 180; + private final RackManager rackManager; private static final Logger LOG = LoggerFactory.getLogger(RegionMover.class); @@ -125,6 +126,7 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { } private RegionMover() { + rackManager = new RackManager(conf); } @Override From f65935adf6cdb6d9e142094484c6e9a98b3080e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=83=9C=E5=88=A9?= <48829688+shenshengli@users.noreply.github.com> Date: Tue, 12 Jan 2021 09:06:13 -0500 Subject: [PATCH 762/769] HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml Revert of the revert -- re-applying HBASE-25449 with a change of renaming the test hdfs XML configuration file as it was adversely affecting tests using MiniDFS This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1. Co-authored-by: Josh Elser Signed-off-by: Peter Somogyi Signed-off-by: Michael Stack Signed-off-by: Duo Zhang --- .../src/main/resources/hbase-default.xml | 4 +- .../src/test/resources/hdfs-scr-enabled.xml | 42 +++++++++++++++++++ 2 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 hbase-common/src/test/resources/hdfs-scr-enabled.xml diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 9092dd147198..20f3881edb2c 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the important. dfs.client.read.shortcircuit - false + If set to true, this configuration parameter enables short-circuit local reads. @@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the important. dfs.domain.socket.path - none + This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml b/hbase-common/src/test/resources/hdfs-scr-enabled.xml new file mode 100644 index 000000000000..8594494782c5 --- /dev/null +++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml @@ -0,0 +1,42 @@ + + + + + + + dfs.client.read.shortcircuit + true + + If set to true, this configuration parameter enables short-circuit local + reads. + + + + dfs.domain.socket.path + /var/lib/hadoop-hdfs/dn_socket + + Optional. This is a path to a UNIX domain socket that will be used for + communication between the DataNode and local HDFS clients. + If the string "_PORT" is present in this path, it will be replaced by the + TCP port of the DataNode. + + + From 77d01ccddaf609ca2693e23f645f0c98e034b3e5 Mon Sep 17 00:00:00 2001 From: huaxiangsun Date: Wed, 20 Jan 2021 09:04:50 -0800 Subject: [PATCH 763/769] =?UTF-8?q?HBASE-25368=20Filter=20out=20more=20inv?= =?UTF-8?q?alid=20encoded=20name=20in=20isEncodedRegionNa=E2=80=A6=20(#286?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HBASE-25368 Filter out more invalid encoded name in isEncodedRegionName(byte[] regionName) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/client/RegionInfo.java | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index d7460e9d15ef..b6bdd0103de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -363,7 +363,23 @@ static byte[] getStartKey(final byte[] regionName) throws IOException { @InterfaceAudience.Private // For use by internals only. public static boolean isEncodedRegionName(byte[] regionName) { // If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex. - return parseRegionNameOrReturnNull(regionName) == null && regionName.length <= MD5_HEX_LENGTH; + if (parseRegionNameOrReturnNull(regionName) == null) { + if (regionName.length > MD5_HEX_LENGTH) { + return false; + } else if (regionName.length == MD5_HEX_LENGTH) { + return true; + } else { + String encodedName = Bytes.toString(regionName); + try { + Integer.parseInt(encodedName); + // If this is a valid integer, it could be hbase:meta's encoded region name. + return true; + } catch(NumberFormatException er) { + return false; + } + } + } + return false; } /** From 38a8b16c5e2655f27e2deeb9dd3ba7f20db29654 Mon Sep 17 00:00:00 2001 From: Baiqiang Zhao Date: Fri, 5 Feb 2021 16:31:53 +0800 Subject: [PATCH 764/769] HBASE-25554 NPE when init RegionMover (#2927) Signed-off-by: Viraj Jasani --- .../src/main/java/org/apache/hadoop/hbase/util/RegionMover.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index fcc3acabcd27..778d66da63d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -87,7 +87,6 @@ public class RegionMover extends AbstractHBaseTool implements Closeable { public static final int DEFAULT_MOVE_RETRIES_MAX = 5; public static final int DEFAULT_MOVE_WAIT_MAX = 60; public static final int DEFAULT_SERVERSTART_WAIT_MAX = 180; - private final RackManager rackManager; private static final Logger LOG = LoggerFactory.getLogger(RegionMover.class); @@ -126,7 +125,6 @@ private RegionMover(RegionMoverBuilder builder) throws IOException { } private RegionMover() { - rackManager = new RackManager(conf); } @Override From 06df04d323ac81127a366db500803d5a648d3fa5 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 19:04:53 +0300 Subject: [PATCH 765/769] Update TestLruBlockCache.java added unit test --- .../hbase/io/hfile/TestLruBlockCache.java | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index afaf85f5b2a1..d139a9588b24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1029,4 +1029,51 @@ public void testMultiThreadGetAndEvictBlock() throws Exception { false, 1024); testMultiThreadGetAndEvictBlockInternal(cache); } + + public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exception { + long maxSize = 100000; + int numBlocks = 100; + final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, + maxSize, + percentOfCachedBlocks, + 0, + 1); + + EvictionThread evictionThread = cache.getEvictionThread(); + assertTrue(evictionThread != null); + while (!evictionThread.isEnteringRun()) { + Thread.sleep(1); + } + + final String hfileName = "hfile"; + for (int blockIndex = 0; blockIndex <= numBlocks * 5; ++blockIndex) { + CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex); + cache.cacheBlock(block.cacheKey, block, false); + Thread.sleep(1); + } + + // Check if all offset of cached blocks less + // It means some of blocka were not put into BlockCache + for (BlockCacheKey key : cache.getMapForTests().keySet()) + Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + + } + + @Test + public void testSkipCacheDataBlocks() throws Exception { + for (int percentOfCachedBlocks = 25; percentOfCachedBlocks <= 100; percentOfCachedBlocks+=25) { + testSkipCacheDataBlocksInteral(percentOfCachedBlocks); + } + } } From 47f42f5ac5f85b1dc55004160581adae747925dc Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 19:06:18 +0300 Subject: [PATCH 766/769] Update TestLruBlockCache.java --- .../java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index d139a9588b24..058d4774c582 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1067,7 +1067,6 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc // It means some of blocka were not put into BlockCache for (BlockCacheKey key : cache.getMapForTests().keySet()) Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); - } @Test From a74579b8da96fc33818b7efa0cb89d36b94550b2 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Mon, 4 May 2020 20:11:36 +0300 Subject: [PATCH 767/769] Update TestLruBlockCache.java fix codestyle --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 058d4774c582..02c8f22c9bb2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1065,8 +1065,9 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc // Check if all offset of cached blocks less // It means some of blocka were not put into BlockCache - for (BlockCacheKey key : cache.getMapForTests().keySet()) + for (BlockCacheKey key : cache.getMapForTests().keySet()) { Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); + } } @Test From 7adff0f47435949fc74f54a72c5b43564b210dc9 Mon Sep 17 00:00:00 2001 From: pustota2009 <61382543+pustota2009@users.noreply.github.com> Date: Tue, 5 May 2020 09:09:12 +0300 Subject: [PATCH 768/769] Update TestLruBlockCache.java added comments --- .../org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 02c8f22c9bb2..4b272f9e27aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1063,7 +1063,7 @@ public void testSkipCacheDataBlocksInteral(int percentOfCachedBlocks) throws Exc Thread.sleep(1); } - // Check if all offset of cached blocks less + // Check if all offset (last two digits) of cached blocks less than the percent. // It means some of blocka were not put into BlockCache for (BlockCacheKey key : cache.getMapForTests().keySet()) { Assert.assertTrue(key.getOffset() % 100 < percentOfCachedBlocks); From ee910f2794880b707c9adcaef164aa46d8649639 Mon Sep 17 00:00:00 2001 From: pustota2009 Date: Sun, 7 Feb 2021 18:41:19 +0300 Subject: [PATCH 769/769] Added AdaptiveLruBlockCache + rebase --- .../apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java index 29b02dcde708..329d80b67a95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AdaptiveLruBlockCache.java @@ -300,8 +300,8 @@ public class AdaptiveLruBlockCache implements FirstLevelBlockCache { private boolean forceInMemory; /** - * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an - * external cache as L2. + * Where to send victims (blocks evicted/missing from the cache). + * This is used only when we use an external cache as L2. * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache */ private transient BlockCache victimHandler = null;