From 910dc1818f94fc93396cb02392c555183e742bdc Mon Sep 17 00:00:00 2001 From: niuyulin Date: Fri, 5 Mar 2021 17:21:50 +0800 Subject: [PATCH] HBASE-25637 Rename method completeCompaction to refreshStoreSizeAndTotalBytes --- .../apache/hadoop/hbase/regionserver/HStore.java | 16 +++++++--------- .../org/apache/hadoop/hbase/TestIOFencing.java | 6 ++---- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 99880efece73..a2a8f9db31bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -296,7 +296,7 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, List hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and - // update the storeSize in the completeCompaction(..) finally (just like compaction) , so + // update the storeSize in the refreshStoreSizeAndTotalBytes() finally (just like compaction) , so // no need calculate the storeSize twice. this.storeSize.addAndGet(getStorefilesSize(hStoreFiles, sf -> true)); this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles)); @@ -713,7 +713,7 @@ private void refreshStoreFilesInternal(Collection newFiles) throw region.getMVCC().advanceTo(this.getMaxSequenceId().getAsLong()); } - completeCompaction(toBeRemovedStoreFiles); + refreshStoreSizeAndTotalBytes(); } protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { @@ -1543,7 +1543,7 @@ protected List doCompaction(CompactionRequestImpl cr, long outputBytes = getTotalSize(sfs); // At this point the store will use new files for all new scanners. - completeCompaction(filesToCompact); // update store size. + refreshStoreSizeAndTotalBytes(); // update store size. long now = EnvironmentEdgeManager.currentTime(); if (region.getRegionServerServices() != null @@ -1769,7 +1769,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick LOG.info("Replaying compaction marker, replacing input files: " + inputStoreFiles + " with output files : " + outputStoreFiles); this.replaceStoreFiles(inputStoreFiles, outputStoreFiles); - this.completeCompaction(inputStoreFiles); + this.refreshStoreSizeAndTotalBytes(); } } @@ -1822,7 +1822,7 @@ public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOExcepti this.getCoprocessorHost().postCompact(this, sf, null, null, null); } replaceStoreFiles(filesToCompact, Collections.singletonList(sf)); - completeCompaction(filesToCompact); + refreshStoreSizeAndTotalBytes(); } } finally { synchronized (filesCompacting) { @@ -2010,7 +2010,7 @@ private void removeUnneededFiles() throws IOException { Collection newFiles = Collections.emptyList(); // No new files. writeCompactionWalRecord(delSfs, newFiles); replaceStoreFiles(delSfs, newFiles); - completeCompaction(delSfs); + refreshStoreSizeAndTotalBytes(); LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this + "; total size is " + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)); @@ -2052,10 +2052,8 @@ private void validateStoreFile(Path path) throws IOException { /** * Update counts. - * @param compactedFiles list of files that were compacted */ - protected void completeCompaction(Collection compactedFiles) - // Rename this method! TODO. + protected void refreshStoreSizeAndTotalBytes() throws IOException { this.storeSize.set(0L); this.totalUncompressedBytes.set(0L); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index 52b6dc372ce3..d363181d0343 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; -import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; @@ -211,14 +209,14 @@ protected BlockCompactionsInCompletionHStore(HRegion region, ColumnFamilyDescrip } @Override - protected void completeCompaction(Collection compactedFiles) throws IOException { + protected void refreshStoreSizeAndTotalBytes() throws IOException { try { r.compactionsWaiting.countDown(); r.compactionsBlocked.await(); } catch (InterruptedException ex) { throw new IOException(ex); } - super.completeCompaction(compactedFiles); + super.refreshStoreSizeAndTotalBytes(); } }