From 69f4aa2a00accbe53341300c6c46de724f8436e8 Mon Sep 17 00:00:00 2001 From: Abhishek Khanna Date: Mon, 3 Aug 2020 23:47:07 -0700 Subject: [PATCH 01/10] HBASE-25249 Adding HStoreContext Adding HStoreContext which contains the metadata about the HStore. This meta data can be used across the HFileWriter/Readers and other HStore consumers without the need of passing around the complete store and exposing its internals. --- .../hbase/mapreduce/HFileOutputFormat2.java | 5 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 5 +- .../hadoop/hbase/regionserver/HMobStore.java | 31 +-- .../hadoop/hbase/regionserver/HStore.java | 244 +++++++++--------- .../hbase/regionserver/HStoreContext.java | 183 +++++++++++++ .../hadoop/hbase/regionserver/StoreUtils.java | 26 ++ .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 5 +- .../wal/BoundedRecoveredHFilesOutputSink.java | 6 +- .../apache/hadoop/hbase/io/TestHeapSize.java | 3 +- .../hbase/regionserver/TestHRegion.java | 2 +- .../TestSecureBulkLoadManager.java | 4 +- 11 files changed, 371 insertions(+), 143 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 75b5246d2c88..ee6d5331f3f6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -369,8 +370,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = encoding == null ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) - .withDataBlockEncoding(encoding).withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) .withColumnFamily(family).withTableName(tableName); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index acc8f74a501b..4ef1b9019bb2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -540,8 +541,8 @@ public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, boolean isCompaction) throws IOException { return createWriter(conf, fs, family, new Path(basePath, mobFileName.getFileName()), - maxKeyCount, compression, cacheConfig, cryptoContext, HStore.getChecksumType(conf), - HStore.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); + maxKeyCount, compression, cacheConfig, cryptoContext, StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 5960b8030900..6d1d2bbf94b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -93,7 +93,6 @@ public class HMobStore extends HStore { private AtomicLong mobFlushedCellsSize = new AtomicLong(); private AtomicLong mobScanCellsCount = new AtomicLong(); private AtomicLong mobScanCellsSize = new AtomicLong(); - private ColumnFamilyDescriptor family; private Map> map = new ConcurrentHashMap<>(); private final IdLock keyLock = new IdLock(); // When we add a MOB reference cell to the HFile, we will add 2 tags along with it @@ -107,16 +106,15 @@ public class HMobStore extends HStore { public HMobStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { super(region, family, confParam, warmup); - this.family = family; this.mobFileCache = region.getMobFileCache(); this.homePath = MobUtils.getMobHome(conf); this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(), - family.getNameAsString()); + getColumnFamilyName()); List locations = new ArrayList<>(2); locations.add(mobFamilyPath); TableName tn = region.getTableDescriptor().getTableName(); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn) - .getEncodedName(), family.getNameAsString())); + .getEncodedName(), getColumnFamilyName())); map.put(tn, locations); List tags = new ArrayList<>(2); tags.add(MobConstants.MOB_REF_TAG); @@ -209,7 +207,7 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException { MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID() - .toString().replaceAll("-", ""), region.getRegionInfo().getEncodedName()); + .toString().replaceAll("-", ""), getHRegion().getRegionInfo().getEncodedName()); return createWriterInTmp(mobFileName, basePath, maxKeyCount, compression, isCompaction); } @@ -226,9 +224,10 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, long maxKeyCount, Compression.Algorithm compression, boolean isCompaction) throws IOException { - return MobUtils.createWriter(conf, region.getFilesystem(), family, - new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, cacheConf, - cryptoContext, checksumType, bytesPerChecksum, blocksize, BloomType.NONE, isCompaction); + return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), + new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), + getEncryptionContext(), StoreUtils.getChecksumType(conf), StoreUtils.getBytesPerChecksum(conf), + blocksize, BloomType.NONE, isCompaction); } /** @@ -245,10 +244,10 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio validateMobFile(sourceFile); LOG.info(" FLUSH Renaming flushed file from {} to {}", sourceFile, dstPath); Path parent = dstPath.getParent(); - if (!region.getFilesystem().exists(parent)) { - region.getFilesystem().mkdirs(parent); + if (!getFileSystem().exists(parent)) { + getFileSystem().mkdirs(parent); } - if (!region.getFilesystem().rename(sourceFile, dstPath)) { + if (!getFileSystem().rename(sourceFile, dstPath)) { throw new IOException("Failed rename of " + sourceFile + " to " + dstPath); } } @@ -261,7 +260,7 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio private void validateMobFile(Path path) throws IOException { HStoreFile storeFile = null; try { - storeFile = new HStoreFile(region.getFilesystem(), path, conf, this.cacheConf, + storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE, isPrimaryReplicaStore()); storeFile.initReader(); } catch (IOException e) { @@ -352,9 +351,11 @@ public List getLocations(TableName tableName) throws IOException { locations = map.get(tableName); if (locations == null) { locations = new ArrayList<>(2); - locations.add(MobUtils.getMobFamilyPath(conf, tableName, family.getNameAsString())); + locations.add(MobUtils.getMobFamilyPath(conf, tableName, getColumnFamilyDescriptor() + .getNameAsString())); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tableName, - MobUtils.getMobRegionInfo(tableName).getEncodedName(), family.getNameAsString())); + MobUtils.getMobRegionInfo(tableName).getEncodedName(), getColumnFamilyDescriptor() + .getNameAsString())); map.put(tableName, locations); } } finally { @@ -388,7 +389,7 @@ private MobCell readCell(List locations, String fileName, Cell search, MobFile file = null; Path path = new Path(location, fileName); try { - file = mobFileCache.openFile(fs, path, cacheConf); + file = mobFileCache.openFile(fs, path, getCacheConfig()); return readPt != -1 ? file.readCell(search, cacheMobBlocks, readPt) : file.readCell(search, cacheMobBlocks); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 3a71c230bebe..b332ccee6e0f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -157,11 +156,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected final MemStore memstore; // This stores directory in the filesystem. - protected final HRegion region; - private final ColumnFamilyDescriptor family; - private final HRegionFileSystem fs; + private final HRegion region; protected Configuration conf; - protected CacheConfig cacheConf; private long lastCompactSize = 0; volatile boolean forceMajor = false; private AtomicLong storeSize = new AtomicLong(); @@ -218,13 +214,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected final int blocksize; private HFileDataBlockEncoder dataBlockEncoder; - /** Checksum configuration */ - protected ChecksumType checksumType; - protected int bytesPerChecksum; - - // Comparing KeyValues - protected final CellComparator comparator; - final StoreEngine storeEngine; private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean(); @@ -236,7 +225,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private long blockingFileCount; private int compactionCheckMultiplier; - protected Encryption.Context cryptoContext = Encryption.Context.NONE; private AtomicLong flushedCellsCount = new AtomicLong(); private AtomicLong compactedCellsCount = new AtomicLong(); @@ -246,6 +234,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); + private HStoreContext storeContext; + /** * Constructor * @param family HColumnDescriptor for this column @@ -254,48 +244,47 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected HStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { - this.fs = region.getRegionFileSystem(); - - // Assemble the store's home directory and Ensure it exists. - fs.createStoreDir(family.getNameAsString()); - this.region = region; - this.family = family; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. this.conf = new CompoundConfiguration() - .add(confParam) - .addBytesMap(region.getTableDescriptor().getValues()) - .addStringMap(family.getConfiguration()) - .addBytesMap(family.getValues()); - this.blocksize = family.getBlocksize(); + .add(confParam) + .addBytesMap(region.getTableDescriptor().getValues()) + .addStringMap(family.getConfiguration()) + .addBytesMap(family.getValues()); + + this.region = region; + this.storeContext = initializeStoreContext(family); + + // Assemble the store's home directory and Ensure it exists. + getRegionFileSystem().createStoreDir(getColumnFamilyName()); + + this.blocksize = getColumnFamilyDescriptor().getBlocksize(); // set block storage policy for store directory - String policyName = family.getStoragePolicy(); + String policyName = getColumnFamilyDescriptor().getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } - this.fs.setStoragePolicy(family.getNameAsString(), policyName.trim()); + getRegionFileSystem().setStoragePolicy(getColumnFamilyName(), policyName.trim()); - this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); + this.dataBlockEncoder = new HFileDataBlockEncoderImpl(getColumnFamilyDescriptor() + .getDataBlockEncoding()); - this.comparator = region.getCellComparator(); // used by ScanQueryMatcher long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); LOG.trace("Time to purge deletes set to {}ms in {}", timeToPurgeDeletes, this); // Get TTL - long ttl = determineTTLFromFamily(family); + long ttl = determineTTLFromFamily(getColumnFamilyDescriptor()); // Why not just pass a HColumnDescriptor in here altogether? Even if have // to clone it? - scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); + scanInfo = new ScanInfo(conf, getColumnFamilyDescriptor(), ttl, timeToPurgeDeletes, + getComparator()); this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); - // Setting up cache configuration for this family - createCacheConf(family); - this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false); this.blockingFileCount = @@ -308,7 +297,7 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; } - this.storeEngine = createStoreEngine(this, this.conf, this.comparator); + this.storeEngine = createStoreEngine(this, this.conf, getComparator()); List hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and @@ -318,10 +307,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles); - // Initialize checksum type from name. The names are CRC32, CRC32C, etc. - this.checksumType = getChecksumType(conf); - // Initialize bytes per checksum - this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE); @@ -330,7 +315,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, "hbase.hstore.flush.retries.number must be > 0, not " + flushRetriesNumber); } - cryptoContext = EncryptionUtil.createEncryptionContext(conf, family); int confPrintThreshold = this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50); @@ -347,6 +331,48 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, cacheOnWriteLogged = false; } + private HStoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { + return new HStoreContext.Builder() + .withBloomType(family.getBloomFilterType()) + .withCacheConfig(createCacheConf(family)) + .withCellComparator(region.getCellComparator()) + .withColumnFamilyDescriptor(family) + .withCompactedFilesSupplier(this::getCompactedFiles) + .withRegionFileSystem(region.getRegionFileSystem()) + .withDefaultHFileContext(getDefaultHFileContext(family)) + .withFavoredNodesSupplier(this::getFavoredNodes) + .withFamilyStoreDirectoryPath(region.getRegionFileSystem() + .getStoreDir(family.getNameAsString())) + .withRegionCoprocessorHost(region.getCoprocessorHost()) + .build(); + } + + private InetSocketAddress[] getFavoredNodes() { + InetSocketAddress[] favoredNodes = null; + if (region.getRegionServerServices() != null) { + favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( + region.getRegionInfo().getEncodedName()); + } + return favoredNodes; + } + + private HFileContext getDefaultHFileContext(ColumnFamilyDescriptor family) throws IOException { + HFileContext hFileContext = new HFileContextBuilder() + .withCompression(HFile.DEFAULT_COMPRESSION_ALGORITHM) + .withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(family.getBlocksize()) + .withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) + .withColumnFamily(family.getName()) + .withTableName(region.getTableDescriptor().getTableName().getName()) + .withCellComparator(region.getCellComparator()) + .build(); + return hFileContext; + } + /** * @return MemStore Instance to use in this store. */ @@ -358,7 +384,7 @@ private MemStore getMemstore() { inMemoryCompaction = MemoryCompactionPolicy.valueOf( conf.get("hbase.systemtables.compacting.memstore.type", "NONE")); } else { - inMemoryCompaction = family.getInMemoryCompaction(); + inMemoryCompaction = getColumnFamilyDescriptor().getInMemoryCompaction(); } if (inMemoryCompaction == null) { inMemoryCompaction = @@ -368,13 +394,13 @@ private MemStore getMemstore() { switch (inMemoryCompaction) { case NONE: ms = ReflectionUtils.newInstance(DefaultMemStore.class, - new Object[] { conf, this.comparator, + new Object[] { conf, getComparator(), this.getHRegion().getRegionServicesForStores()}); break; default: Class clz = conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class); - ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this, + ms = ReflectionUtils.newInstance(clz, new Object[]{conf, getComparator(), this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction}); } return ms; @@ -384,10 +410,12 @@ private MemStore getMemstore() { * Creates the cache config. * @param family The current column family. */ - protected void createCacheConf(final ColumnFamilyDescriptor family) { - this.cacheConf = new CacheConfig(conf, family, region.getBlockCache(), + protected CacheConfig createCacheConf(final ColumnFamilyDescriptor family) { + CacheConfig cacheConf = new CacheConfig(conf, family, region.getBlockCache(), region.getRegionServicesForStores().getByteBuffAllocator()); - LOG.info("Created cacheConfig: " + this.getCacheConfig() + " for " + this); + LOG.info("Created cacheConfig: {}, for column family {} of region {} ", cacheConf, + family.getNameAsString(), region.getRegionInfo().getEncodedName()); + return cacheConf; } /** @@ -400,7 +428,7 @@ protected void createCacheConf(final ColumnFamilyDescriptor family) { */ protected StoreEngine createStoreEngine(HStore store, Configuration conf, CellComparator kvComparator) throws IOException { - return StoreEngine.create(store, conf, comparator); + return StoreEngine.create(store, conf, getComparator()); } /** @@ -423,7 +451,7 @@ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { @Override public String getColumnFamilyName() { - return this.family.getNameAsString(); + return this.storeContext.getFamily().getNameAsString(); } @Override @@ -433,11 +461,11 @@ public TableName getTableName() { @Override public FileSystem getFileSystem() { - return this.fs.getFileSystem(); + return storeContext.getRegionFileSystem().getFileSystem(); } public HRegionFileSystem getRegionFileSystem() { - return this.fs; + return storeContext.getRegionFileSystem(); } /* Implementation of StoreConfigInformation */ @@ -474,33 +502,14 @@ public long getBlockingFileCount() { } /* End implementation of StoreConfigInformation */ - /** - * Returns the configured bytesPerChecksum value. - * @param conf The configuration - * @return The bytesPerChecksum that is set in the configuration - */ - public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); - } - - /** - * Returns the configured checksum algorithm. - * @param conf The configuration - * @return The checksum algorithm that is set in the configuration - */ - public static ChecksumType getChecksumType(Configuration conf) { - String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); - if (checksumName == null) { - return ChecksumType.getDefaultChecksumType(); - } else { - return ChecksumType.nameToType(checksumName); - } - } @Override public ColumnFamilyDescriptor getColumnFamilyDescriptor() { - return this.family; + return this.storeContext.getFamily(); + } + + public Encryption.Context getEncryptionContext() { + return storeContext.getDefaultFileContext().getEncryptionContext(); } @Override @@ -559,7 +568,7 @@ void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) { * from the given directory. */ private List loadStoreFiles(boolean warmup) throws IOException { - Collection files = fs.getStoreFiles(getColumnFamilyName()); + Collection files = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); return openStoreFiles(files, warmup); } @@ -610,7 +619,7 @@ private List openStoreFiles(Collection files, boolean if (ioe != null) { // close StoreFile readers boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (HStoreFile file : results) { try { if (file != null) { @@ -638,7 +647,8 @@ private List openStoreFiles(Collection files, boolean results.removeAll(filesToRemove); if (!filesToRemove.isEmpty() && this.isPrimaryReplicaStore()) { LOG.debug("Moving the files {} to archive", filesToRemove); - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); + getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), + filesToRemove); } } @@ -647,7 +657,7 @@ private List openStoreFiles(Collection files, boolean @Override public void refreshStoreFiles() throws IOException { - Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); + Collection newFiles = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } @@ -658,7 +668,7 @@ public void refreshStoreFiles() throws IOException { public void refreshStoreFiles(Collection newFiles) throws IOException { List storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { - storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); + storeFiles.add(getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file)); } refreshStoreFilesInternal(storeFiles); } @@ -735,7 +745,8 @@ protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); - HStoreFile storeFile = new HStoreFile(info, this.family.getBloomFilterType(), this.cacheConf); + HStoreFile storeFile = new HStoreFile(info, getColumnFamilyDescriptor().getBloomFilterType(), + getCacheConfig()); storeFile.initReader(); return storeFile; } @@ -818,7 +829,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this); FileSystem srcFs = srcPath.getFileSystem(conf); srcFs.access(srcPath, FsAction.READ_WRITE); - reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf); + reader = HFile.createReader(srcFs, srcPath, getCacheConfig(), isPrimaryReplicaStore(), conf); Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); @@ -855,7 +866,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { do { Cell cell = scanner.getCell(); if (prevCell != null) { - if (comparator.compareRows(prevCell, cell) > 0) { + if (getComparator().compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" @@ -892,13 +903,13 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { */ public Pair preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); - return fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); + return getRegionFileSystem().bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); } public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { Path srcPath = new Path(srcPathStr); try { - fs.commitStoreFile(srcPath, dstPath); + getRegionFileSystem().commitStoreFile(srcPath, dstPath); } finally { if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); @@ -964,8 +975,8 @@ public ImmutableCollection close() throws IOException { storeEngine.getStoreFileManager().clearCompactedFiles(); // clear the compacted files if (CollectionUtils.isNotEmpty(compactedfiles)) { - removeCompactedfiles(compactedfiles, cacheConf != null ? - cacheConf.shouldEvictOnClose() : true); + removeCompactedfiles(compactedfiles, getCacheConfig() != null ? + getCacheConfig().shouldEvictOnClose() : true); } if (!result.isEmpty()) { // initialize the thread pool for closing store files in parallel. @@ -981,7 +992,7 @@ public ImmutableCollection close() throws IOException { @Override public Void call() throws IOException { boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; f.closeStoreFile(evictOnClose); return null; } @@ -1092,7 +1103,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { FileSystem srcFs = path.getFileSystem(conf); srcFs.access(path, FsAction.READ_WRITE); try (HFile.Reader reader = - HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) { + HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional lk = reader.getLastKey(); @@ -1104,7 +1115,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { } } - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); @@ -1129,7 +1140,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); HStoreFile sf = createStoreFileAndReader(dstPath); @@ -1167,12 +1178,13 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm boolean shouldDropBehind, long totalCompactedFilesSize, String fileStoragePolicy) throws IOException { // creating new cache config for each new writer + final CacheConfig cacheConf = getCacheConfig(); final CacheConfig writerCacheConf = new CacheConfig(cacheConf); if (isCompaction) { // Don't cache data on write on compactions, unless specifically configured to do so // Cache only when total file size remains lower than configured threshold final boolean cacheCompactedBlocksOnWrite = - cacheConf.shouldCacheCompactedBlocksOnWrite(); + getCacheConfig().shouldCacheCompactedBlocksOnWrite(); // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well @@ -1212,12 +1224,12 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm region.getRegionInfo().getEncodedName()); } HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, - cryptoContext); - Path familyTempDir = new Path(fs.getTempDir(), family.getNameAsString()); + getEncryptionContext()); + Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName()); StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, this.getFileSystem()) .withOutputDir(familyTempDir) - .withBloomType(family.getBloomFilterType()) + .withBloomType(storeContext.getBloomFilterType()) .withMaxKeyCount(maxKeyCount) .withFavoredNodes(favoredNodes) .withFileContext(hFileContext) @@ -1236,18 +1248,19 @@ private HFileContext createFileContext(Compression.Algorithm compression, .withIncludesMvcc(includeMVCCReadpoint) .withIncludesTags(includesTag) .withCompression(compression) - .withCompressTags(family.isCompressTags()) - .withChecksumType(checksumType) - .withBytesPerCheckSum(bytesPerChecksum) + .withCompressTags(getColumnFamilyDescriptor().isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) .withBlockSize(blocksize) .withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()) + .withDataBlockEncoding(getColumnFamilyDescriptor() + .getDataBlockEncoding()) .withEncryptionContext(cryptoContext) .withCreateTime(EnvironmentEdgeManager.currentTime()) - .withColumnFamily(family.getName()) + .withColumnFamily(getColumnFamilyDescriptor().getName()) .withTableName(region.getTableDescriptor() .getTableName().getName()) - .withCellComparator(this.comparator) + .withCellComparator(getComparator()) .build(); return hFileContext; } @@ -1529,7 +1542,7 @@ public List compact(CompactionContext compaction, // Ready to go. Have list of files to compact. LOG.info("Starting compaction of " + filesToCompact + - " into tmpdir=" + fs.getTempDir() + ", totalSize=" + + " into tmpdir=" + getRegionFileSystem().getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); return doCompaction(cr, filesToCompact, user, compactionStartTime, @@ -1579,7 +1592,7 @@ private void setStoragePolicyFromFileName(List newFiles) throws IOExceptio String prefix = HConstants.STORAGE_POLICY_PREFIX; for (Path newFile : newFiles) { if (newFile.getParent().getName().startsWith(prefix)) { - CommonFSUtils.setStoragePolicy(fs.getFileSystem(), newFile, + CommonFSUtils.setStoragePolicy(getRegionFileSystem().getFileSystem(), newFile, newFile.getParent().getName().substring(prefix.length())); } } @@ -1604,7 +1617,7 @@ private List moveCompactedFilesIntoPlace(CompactionRequestImpl cr, HStoreFile moveFileIntoPlace(Path newFile) throws IOException { validateStoreFile(newFile); // Move the file into the right spot - Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile); + Path destPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), newFile); return createStoreFileAndReader(destPath); } @@ -1624,8 +1637,8 @@ private void writeCompactionWalRecord(Collection filesCompacted, newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList()); RegionInfo info = this.region.getRegionInfo(); CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info, - family.getName(), inputPaths, outputPaths, - fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString())); + getColumnFamilyDescriptor().getName(), inputPaths, outputPaths, + getRegionFileSystem().getStoreDir(getColumnFamilyDescriptor().getNameAsString())); // Fix reaching into Region to get the maxWaitForSeqId. // Does this method belong in Region altogether given it is making so many references up there? // Could be Region#writeCompactionMarker(compactionDescriptor); @@ -1752,7 +1765,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick String familyName = this.getColumnFamilyName(); Set inputFiles = new HashSet<>(); for (String compactionInput : compactionInputs) { - Path inputPath = fs.getStoreFilePath(familyName, compactionInput); + Path inputPath = getRegionFileSystem().getStoreFilePath(familyName, compactionInput); inputFiles.add(inputPath.getName()); } @@ -1772,7 +1785,8 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick compactionOutputs.remove(sf.getPath().getName()); } for (String compactionOutput : compactionOutputs) { - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), compactionOutput); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); outputStoreFiles.add(storeFile); } @@ -2092,7 +2106,7 @@ int versionsToReturn(final int wantedVersions) { throw new IllegalArgumentException("Number of versions must be > 0"); } // Make sure we do not return more than maximum versions for this store. - int maxVersions = this.family.getMaxVersions(); + int maxVersions = getColumnFamilyDescriptor().getMaxVersions(); return wantedVersions > maxVersions ? maxVersions: wantedVersions; } @@ -2367,7 +2381,7 @@ public RegionCoprocessorHost getCoprocessorHost() { @Override public RegionInfo getRegionInfo() { - return this.fs.getRegionInfo(); + return getRegionFileSystem().getRegionInfo(); } @Override @@ -2509,7 +2523,7 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); + StoreFileInfo storeFileInfo = getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); @@ -2559,7 +2573,7 @@ public boolean needsCompaction() { * @return cache configuration for this Store. */ public CacheConfig getCacheConfig() { - return this.cacheConf; + return storeContext.getCacheConf(); } public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); @@ -2573,12 +2587,12 @@ public CacheConfig getCacheConfig() { @Override public long heapSize() { MemStoreSize memstoreSize = this.memstore.size(); - return DEEP_OVERHEAD + memstoreSize.getHeapSize(); + return DEEP_OVERHEAD + memstoreSize.getHeapSize() + storeContext.heapSize(); } @Override public CellComparator getComparator() { - return comparator; + return storeContext.getComparator(); } public ScanInfo getScanInfo() { @@ -2652,7 +2666,7 @@ protected OffPeakHours getOffPeakHours() { public void onConfigurationChange(Configuration conf) { this.conf = new CompoundConfiguration() .add(conf) - .addBytesMap(family.getValues()); + .addBytesMap(getColumnFamilyDescriptor().getValues()); this.storeEngine.compactionPolicy.setConf(conf); this.offPeakHours = OffPeakHours.getInstance(conf); } @@ -2784,8 +2798,8 @@ private void removeCompactedfiles(Collection compactedfiles, boolean LOG.debug("Moving the files {} to archive", filesToRemove); // Only if this is successful it has to be removed try { - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), - filesToRemove); + getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), + filesToRemove); } catch (FailedArchiveException fae) { // Even if archiving some files failed, we still need to clear out any of the // files which were successfully archived. Otherwise we will receive a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java new file mode 100644 index 000000000000..18ae7a6290b7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.function.Supplier; + +/** + * This carries the information on some of the meta data about the HStore. This + * meta data can be used across the HFileWriter/Readers and other HStore consumers without the + * need of passing around the complete store. + */ +@InterfaceAudience.Private +public class HStoreContext implements HeapSize { + public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); + + private final HFileContext defaultFileContext; + private final CacheConfig cacheConf; + private final HRegionFileSystem regionFileSystem; + private final CellComparator comparator; + private final BloomType bloomFilterType; + private final Supplier> compactedFilesSupplier; + private final Supplier favoredNodesSupplier; + private final ColumnFamilyDescriptor family; + private final Path familyStoreDirectoryPath; + private final RegionCoprocessorHost coprocessorHost; + + private HStoreContext(Builder builder) { + this.defaultFileContext = builder.defaultFileContext; + this.cacheConf = builder.cacheConf; + this.regionFileSystem = builder.regionFileSystem; + this.comparator = builder.comparator; + this.bloomFilterType = builder.bloomFilterType; + this.compactedFilesSupplier = builder.compactedFilesSupplier; + this.favoredNodesSupplier = builder.favoredNodesSupplier; + this.family = builder.family; + this.familyStoreDirectoryPath = builder.familyStoreDirectoryPath; + this.coprocessorHost = builder.coprocessorHost; + } + + public HFileContext getDefaultFileContext() { + return defaultFileContext; + } + + public CacheConfig getCacheConf() { + return cacheConf; + } + + public HRegionFileSystem getRegionFileSystem() { + return regionFileSystem; + } + + public CellComparator getComparator() { + return comparator; + } + + public BloomType getBloomFilterType() { + return bloomFilterType; + } + + public Supplier> getCompactedFilesSupplier() { + return compactedFilesSupplier; + } + + public Supplier getFavoredNodesSupplier() { + return favoredNodesSupplier; + } + + public ColumnFamilyDescriptor getFamily() { + return family; + } + + public Path getFamilyStoreDirectoryPath() { + return familyStoreDirectoryPath; + } + + public RegionCoprocessorHost getCoprocessorHost() { + return coprocessorHost; + } + + public static Builder getBuilder() { + return new Builder(); + } + + @Override + public long heapSize() { + return FIXED_OVERHEAD + defaultFileContext.heapSize(); + } + + public static class Builder { + private HFileContext defaultFileContext; + private CacheConfig cacheConf; + private HRegionFileSystem regionFileSystem; + private CellComparator comparator; + private BloomType bloomFilterType; + private Supplier> compactedFilesSupplier; + private Supplier favoredNodesSupplier; + private ColumnFamilyDescriptor family; + private Path familyStoreDirectoryPath; + private RegionCoprocessorHost coprocessorHost; + + public Builder withDefaultHFileContext(HFileContext defaultFileContext) { + this.defaultFileContext = defaultFileContext; + return this; + } + + public Builder withCacheConfig(CacheConfig cacheConf) { + this.cacheConf = cacheConf; + return this; + } + + public Builder withRegionFileSystem(HRegionFileSystem regionFileSystem) { + this.regionFileSystem = regionFileSystem; + return this; + } + + public Builder withCellComparator(CellComparator comparator) { + this.comparator = comparator; + return this; + } + + public Builder withBloomType(BloomType bloomFilterType) { + this.bloomFilterType = bloomFilterType; + return this; + } + + public Builder withCompactedFilesSupplier(Supplier> + compactedFilesSupplier) { + this.compactedFilesSupplier = compactedFilesSupplier; + return this; + } + + public Builder withFavoredNodesSupplier(Supplier favoredNodesSupplier) { + this.favoredNodesSupplier = favoredNodesSupplier; + return this; + } + + public Builder withColumnFamilyDescriptor(ColumnFamilyDescriptor family) { + this.family = family; + return this; + } + + public Builder withFamilyStoreDirectoryPath(Path familyStoreDirectoryPath) { + this.familyStoreDirectoryPath = familyStoreDirectoryPath; + return this; + } + + public Builder withRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { + this.coprocessorHost = coprocessorHost; + return this; + } + + public HStoreContext build () { + return new HStoreContext(this); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 0e4f6c2bb8a4..6d383e86eb3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -24,9 +24,13 @@ import java.util.OptionalInt; import java.util.OptionalLong; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -136,4 +140,26 @@ static Optional getSplitPoint(Collection storefiles, return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) : Optional.empty(); } + + /** + * Returns the configured checksum algorithm. + * @param conf The configuration + * @return The checksum algorithm that is set in the configuration + */ + public static ChecksumType getChecksumType(Configuration conf) { + String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); + return checksumName == null ? ChecksumType.getDefaultChecksumType() : + ChecksumType.nameToType(checksumName); + } + + /** + * Returns the configured bytesPerChecksum value. + * @param conf The configuration + * @return The bytesPerChecksum that is set in the configuration + */ + public static int getBytesPerChecksum(Configuration conf) { + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, + HFile.DEFAULT_BYTES_PER_CHECKSUM); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index ec9a59c7bf5a..342565d069a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; @@ -749,8 +750,8 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Algorithm compression = familyDescriptor.getCompressionType(); BloomType bloomFilterType = familyDescriptor.getBloomFilterType(); HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) .build(); halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 9623bd1c7220..50bc5fe62fb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.CellSet; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.EntryBuffers.RegionEntryBuffer; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -199,8 +199,8 @@ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String r new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) .withOutputDir(outputDir); HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(HStore.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(HStore.getBytesPerChecksum(walSplitter.conf)). + withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). + withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). withCellComparator(isMetaTable? MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); return writerBuilder.withFileContext(hFileContext).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 3d713052559e..9e72ea51e0a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.DefaultMemStore; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.HStoreContext; import org.apache.hadoop.hbase.regionserver.ImmutableSegment; import org.apache.hadoop.hbase.regionserver.MemStoreCompactor; import org.apache.hadoop.hbase.regionserver.MutableSegment; @@ -606,7 +607,7 @@ public void testObjectSize() throws IOException { @Test public void testAutoCalcFixedOverHead() { Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class, - HFileBlock.class, HStore.class, LruBlockCache.class }; + HFileBlock.class, HStore.class, LruBlockCache.class, HStoreContext.class }; for (Class cl : classList) { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index fcbc718296ae..bd1715b1ef52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -7657,7 +7657,7 @@ protected List doCompaction(CompactionRequestImpl cr, LOG.warn("hbase.hstore.compaction.complete is set to false"); List sfs = new ArrayList<>(newFiles.size()); final boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (Path newFile : newFiles) { // Create storefile around what we wrote with a reader on it. HStoreFile sf = createStoreFileAndReader(newFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 12cf57671f9c..88f201efff6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -221,8 +221,8 @@ private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { .withIncludesTags(true) .withCompression(compression) .withCompressTags(family.isCompressTags()) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) .withBlockSize(family.getBlocksize()) .withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding()) From 2731227aaedbcb904b6a8f6036459ea53a296aa2 Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Tue, 22 Dec 2020 15:12:10 -0800 Subject: [PATCH 02/10] fix checkstyle --- .../main/java/org/apache/hadoop/hbase/mob/MobUtils.java | 1 - .../org/apache/hadoop/hbase/regionserver/HMobStore.java | 4 ++-- .../java/org/apache/hadoop/hbase/regionserver/HStore.java | 7 ++++--- .../apache/hadoop/hbase/regionserver/HStoreContext.java | 7 +++---- .../org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java | 1 - .../test/java/org/apache/hadoop/hbase/io/TestHeapSize.java | 2 +- 6 files changed, 10 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index 4ef1b9019bb2..2ae29385eb42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreUtils; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 6d1d2bbf94b2..bd87ea9eb072 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -226,8 +226,8 @@ public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, boolean isCompaction) throws IOException { return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), - getEncryptionContext(), StoreUtils.getChecksumType(conf), StoreUtils.getBytesPerChecksum(conf), - blocksize, BloomType.NONE, isCompaction); + getEncryptionContext(), StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), blocksize, BloomType.NONE, isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index b332ccee6e0f..c66e3aec4546 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -2523,7 +2523,8 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) - StoreFileInfo storeFileInfo = getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); @@ -2798,8 +2799,8 @@ private void removeCompactedfiles(Collection compactedfiles, boolean LOG.debug("Moving the files {} to archive", filesToRemove); // Only if this is successful it has to be removed try { - getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), - filesToRemove); + getRegionFileSystem() + .removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); } catch (FailedArchiveException fae) { // Even if archiving some files failed, we still need to clear out any of the // files which were successfully archived. Otherwise we will receive a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java index 18ae7a6290b7..9c538083a67b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.regionserver; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.function.Supplier; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -26,10 +29,6 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.function.Supplier; - /** * This carries the information on some of the meta data about the HStore. This * meta data can be used across the HFileWriter/Readers and other HStore consumers without the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 342565d069a0..b0b086e145a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -83,7 +83,6 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreUtils; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 9e72ea51e0a2..d130abcc893d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -607,7 +607,7 @@ public void testObjectSize() throws IOException { @Test public void testAutoCalcFixedOverHead() { Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class, - HFileBlock.class, HStore.class, LruBlockCache.class, HStoreContext.class }; + HFileBlock.class, HStore.class, LruBlockCache.class, HStoreContext.class }; for (Class cl : classList) { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); From baeef9cad915d22678151920ac968bc675a3116f Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Tue, 22 Dec 2020 18:48:59 -0800 Subject: [PATCH 03/10] additional fix checkstyle for HStoreContext --- .../org/apache/hadoop/hbase/regionserver/HStoreContext.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java index 9c538083a67b..26f6eaa4c7a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java @@ -174,7 +174,7 @@ public Builder withRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) return this; } - public HStoreContext build () { + public HStoreContext build() { return new HStoreContext(this); } } From 7725f5f131525c73a27cd4e5e26eedd8ee8920bd Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Wed, 23 Dec 2020 11:53:05 -0800 Subject: [PATCH 04/10] Address comments from stack - rename HStoreContext to StoreContext - rewrite the getChecksumType - fix one more style issues --- .../java/org/apache/hadoop/hbase/regionserver/HStore.java | 6 +++--- .../{HStoreContext.java => StoreContext.java} | 8 ++++---- .../org/apache/hadoop/hbase/regionserver/StoreUtils.java | 5 ++--- .../java/org/apache/hadoop/hbase/io/TestHeapSize.java | 4 ++-- 4 files changed, 11 insertions(+), 12 deletions(-) rename hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/{HStoreContext.java => StoreContext.java} (97%) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index c66e3aec4546..c617ae46d6d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -234,7 +234,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); - private HStoreContext storeContext; + private StoreContext storeContext; /** * Constructor @@ -331,8 +331,8 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, cacheOnWriteLogged = false; } - private HStoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { - return new HStoreContext.Builder() + private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { + return new StoreContext.Builder() .withBloomType(family.getBloomFilterType()) .withCacheConfig(createCacheConf(family)) .withCellComparator(region.getCellComparator()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java similarity index 97% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java index 26f6eaa4c7a8..53ce790d7f6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -35,7 +35,7 @@ * need of passing around the complete store. */ @InterfaceAudience.Private -public class HStoreContext implements HeapSize { +public final class StoreContext implements HeapSize { public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); private final HFileContext defaultFileContext; @@ -49,7 +49,7 @@ public class HStoreContext implements HeapSize { private final Path familyStoreDirectoryPath; private final RegionCoprocessorHost coprocessorHost; - private HStoreContext(Builder builder) { + private StoreContext(Builder builder) { this.defaultFileContext = builder.defaultFileContext; this.cacheConf = builder.cacheConf; this.regionFileSystem = builder.regionFileSystem; @@ -174,8 +174,8 @@ public Builder withRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) return this; } - public HStoreContext build() { - return new HStoreContext(this); + public StoreContext build() { + return new StoreContext(this); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 6d383e86eb3a..ac5955feca7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -147,9 +147,8 @@ static Optional getSplitPoint(Collection storefiles, * @return The checksum algorithm that is set in the configuration */ public static ChecksumType getChecksumType(Configuration conf) { - String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); - return checksumName == null ? ChecksumType.getDefaultChecksumType() : - ChecksumType.nameToType(checksumName); + return ChecksumType.nameToType( + conf.get(HConstants.CHECKSUM_TYPE_NAME, ChecksumType.getDefaultChecksumType().getName())); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index d130abcc893d..366d75218a8c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.regionserver.DefaultMemStore; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.HStoreContext; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.ImmutableSegment; import org.apache.hadoop.hbase.regionserver.MemStoreCompactor; import org.apache.hadoop.hbase.regionserver.MutableSegment; @@ -607,7 +607,7 @@ public void testObjectSize() throws IOException { @Test public void testAutoCalcFixedOverHead() { Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class, - HFileBlock.class, HStore.class, LruBlockCache.class, HStoreContext.class }; + HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class }; for (Class cl : classList) { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); From 8a9af9fa8f2efcd7bfda6cff4dd4635f1227d810 Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Wed, 23 Dec 2020 14:16:47 -0800 Subject: [PATCH 05/10] make storecontext final in HStore and fix import --- .../java/org/apache/hadoop/hbase/regionserver/HStore.java | 6 +++--- .../test/java/org/apache/hadoop/hbase/io/TestHeapSize.java | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index c617ae46d6d5..4fc79a38c15c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -234,7 +234,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); - private StoreContext storeContext; + private final StoreContext storeContext; /** * Constructor @@ -1254,12 +1254,12 @@ private HFileContext createFileContext(Compression.Algorithm compression, .withBlockSize(blocksize) .withHBaseCheckSum(true) .withDataBlockEncoding(getColumnFamilyDescriptor() - .getDataBlockEncoding()) + .getDataBlockEncoding()) .withEncryptionContext(cryptoContext) .withCreateTime(EnvironmentEdgeManager.currentTime()) .withColumnFamily(getColumnFamilyDescriptor().getName()) .withTableName(region.getTableDescriptor() - .getTableName().getName()) + .getTableName().getName()) .withCellComparator(getComparator()) .build(); return hFileContext; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 366d75218a8c..3f326a30cfdf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -58,11 +58,11 @@ import org.apache.hadoop.hbase.regionserver.DefaultMemStore; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.ImmutableSegment; import org.apache.hadoop.hbase.regionserver.MemStoreCompactor; import org.apache.hadoop.hbase.regionserver.MutableSegment; import org.apache.hadoop.hbase.regionserver.Segment; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.NonSyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.SyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector; From b3c9561e16621296589f36554419e28edb67cdf9 Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Wed, 23 Dec 2020 15:57:21 -0800 Subject: [PATCH 06/10] use storecontext with writer generation --- .../hadoop/hbase/io/hfile/HFileContext.java | 4 ++ .../hadoop/hbase/regionserver/HStore.java | 56 ++++++------------- 2 files changed, 22 insertions(+), 38 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java index cfadb6cfd337..2e8da6481aee 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java @@ -138,6 +138,10 @@ public Compression.Algorithm getCompression() { return compressAlgo; } + public void setCompression(Compression.Algorithm compressAlgo) { + this.compressAlgo = compressAlgo; + } + public boolean isUseHBaseChecksum() { return usesHBaseChecksum; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 4fc79a38c15c..2df7a1b36511 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1218,54 +1218,34 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm } } } - InetSocketAddress[] favoredNodes = null; - if (region.getRegionServerServices() != null) { - favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( - region.getRegionInfo().getEncodedName()); - } - HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, - getEncryptionContext()); + HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag); Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName()); - StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, - this.getFileSystem()) - .withOutputDir(familyTempDir) - .withBloomType(storeContext.getBloomFilterType()) - .withMaxKeyCount(maxKeyCount) - .withFavoredNodes(favoredNodes) - .withFileContext(hFileContext) - .withShouldDropCacheBehind(shouldDropBehind) - .withCompactedFilesSupplier(this::getCompactedFiles) - .withFileStoragePolicy(fileStoragePolicy); + StoreFileWriter.Builder builder = + new StoreFileWriter.Builder(conf, writerCacheConf, getFileSystem()) + .withOutputDir(familyTempDir) + .withBloomType(storeContext.getBloomFilterType()) + .withMaxKeyCount(maxKeyCount) + .withFavoredNodes(storeContext.getFavoredNodesSupplier().get()) + .withFileContext(hFileContext) + .withShouldDropCacheBehind(shouldDropBehind) + .withCompactedFilesSupplier(storeContext.getCompactedFilesSupplier()) + .withFileStoragePolicy(fileStoragePolicy); return builder.build(); } private HFileContext createFileContext(Compression.Algorithm compression, - boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) { + boolean includeMVCCReadpoint, boolean includesTag) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } - HFileContext hFileContext = new HFileContextBuilder() - .withIncludesMvcc(includeMVCCReadpoint) - .withIncludesTags(includesTag) - .withCompression(compression) - .withCompressTags(getColumnFamilyDescriptor().isCompressTags()) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(blocksize) - .withHBaseCheckSum(true) - .withDataBlockEncoding(getColumnFamilyDescriptor() - .getDataBlockEncoding()) - .withEncryptionContext(cryptoContext) - .withCreateTime(EnvironmentEdgeManager.currentTime()) - .withColumnFamily(getColumnFamilyDescriptor().getName()) - .withTableName(region.getTableDescriptor() - .getTableName().getName()) - .withCellComparator(getComparator()) - .build(); - return hFileContext; + HFileContext fileContext = storeContext.getDefaultFileContext(); + fileContext.setIncludesMvcc(includeMVCCReadpoint); + fileContext.setIncludesTags(includesTag); + fileContext.setCompression(compression); + fileContext.setFileCreateTime(EnvironmentEdgeManager.currentTime()); + return fileContext; } - private long getTotalSize(Collection sfs) { return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum(); } From 5e87534477d0f130a10476ae6b1ae2128c21d28c Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Tue, 29 Dec 2020 16:18:59 -0800 Subject: [PATCH 07/10] hide supplier from the getter of `FavoredNodes` --- .../java/org/apache/hadoop/hbase/regionserver/HStore.java | 2 +- .../org/apache/hadoop/hbase/regionserver/StoreContext.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 2df7a1b36511..8a8191d09fb4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1225,7 +1225,7 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm .withOutputDir(familyTempDir) .withBloomType(storeContext.getBloomFilterType()) .withMaxKeyCount(maxKeyCount) - .withFavoredNodes(storeContext.getFavoredNodesSupplier().get()) + .withFavoredNodes(storeContext.getFavoredNodes()) .withFileContext(hFileContext) .withShouldDropCacheBehind(shouldDropBehind) .withCompactedFilesSupplier(storeContext.getCompactedFilesSupplier()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java index 53ce790d7f6d..1a66076f1cd9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -86,8 +86,8 @@ public Supplier> getCompactedFilesSupplier() { return compactedFilesSupplier; } - public Supplier getFavoredNodesSupplier() { - return favoredNodesSupplier; + public InetSocketAddress[] getFavoredNodes() { + return favoredNodesSupplier.get(); } public ColumnFamilyDescriptor getFamily() { From b2df4758903235ba4feb7508d0986debb35e32a9 Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Tue, 5 Jan 2021 17:53:20 -0800 Subject: [PATCH 08/10] address comments and remove the defaultFileContext from StoreContext --- .../hadoop/hbase/regionserver/HMobStore.java | 4 +- .../hadoop/hbase/regionserver/HStore.java | 92 +++++++++---------- .../hadoop/hbase/regionserver/Store.java | 2 + .../hbase/regionserver/StoreContext.java | 34 ++++--- 4 files changed, 73 insertions(+), 59 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index bd87ea9eb072..ce8e1902a7d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -226,8 +226,8 @@ public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, boolean isCompaction) throws IOException { return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), - getEncryptionContext(), StoreUtils.getChecksumType(conf), - StoreUtils.getBytesPerChecksum(conf), blocksize, BloomType.NONE, isCompaction); + getStoreContext().getEncryptionContext(), StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), getBlockSize(), BloomType.NONE, isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 8a8191d09fb4..4d740db1f679 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -211,7 +211,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private final Set changedReaderObservers = Collections.newSetFromMap(new ConcurrentHashMap()); - protected final int blocksize; private HFileDataBlockEncoder dataBlockEncoder; final StoreEngine storeEngine; @@ -257,12 +256,10 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.storeContext = initializeStoreContext(family); // Assemble the store's home directory and Ensure it exists. - getRegionFileSystem().createStoreDir(getColumnFamilyName()); - - this.blocksize = getColumnFamilyDescriptor().getBlocksize(); + getRegionFileSystem().createStoreDir(family.getNameAsString()); // set block storage policy for store directory - String policyName = getColumnFamilyDescriptor().getStoragePolicy(); + String policyName = family.getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } @@ -333,18 +330,19 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { return new StoreContext.Builder() - .withBloomType(family.getBloomFilterType()) - .withCacheConfig(createCacheConf(family)) - .withCellComparator(region.getCellComparator()) - .withColumnFamilyDescriptor(family) - .withCompactedFilesSupplier(this::getCompactedFiles) - .withRegionFileSystem(region.getRegionFileSystem()) - .withDefaultHFileContext(getDefaultHFileContext(family)) - .withFavoredNodesSupplier(this::getFavoredNodes) - .withFamilyStoreDirectoryPath(region.getRegionFileSystem() - .getStoreDir(family.getNameAsString())) - .withRegionCoprocessorHost(region.getCoprocessorHost()) - .build(); + .withBlockSize(family.getBlocksize()) + .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) + .withBloomType(family.getBloomFilterType()) + .withCacheConfig(createCacheConf(family)) + .withCellComparator(region.getCellComparator()) + .withColumnFamilyDescriptor(family) + .withCompactedFilesSupplier(this::getCompactedFiles) + .withRegionFileSystem(region.getRegionFileSystem()) + .withFavoredNodesSupplier(this::getFavoredNodes) + .withFamilyStoreDirectoryPath(region.getRegionFileSystem() + .getStoreDir(family.getNameAsString())) + .withRegionCoprocessorHost(region.getCoprocessorHost()) + .build(); } private InetSocketAddress[] getFavoredNodes() { @@ -356,23 +354,6 @@ private InetSocketAddress[] getFavoredNodes() { return favoredNodes; } - private HFileContext getDefaultHFileContext(ColumnFamilyDescriptor family) throws IOException { - HFileContext hFileContext = new HFileContextBuilder() - .withCompression(HFile.DEFAULT_COMPRESSION_ALGORITHM) - .withCompressTags(family.isCompressTags()) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(family.getBlocksize()) - .withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()) - .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) - .withColumnFamily(family.getName()) - .withTableName(region.getTableDescriptor().getTableName().getName()) - .withCellComparator(region.getCellComparator()) - .build(); - return hFileContext; - } - /** * @return MemStore Instance to use in this store. */ @@ -449,6 +430,15 @@ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { return ttl; } + StoreContext getStoreContext() { + return storeContext; + } + + @Override + public int getBlockSize() { + return this.storeContext.getBlockSize(); + } + @Override public String getColumnFamilyName() { return this.storeContext.getFamily().getNameAsString(); @@ -508,10 +498,6 @@ public ColumnFamilyDescriptor getColumnFamilyDescriptor() { return this.storeContext.getFamily(); } - public Encryption.Context getEncryptionContext() { - return storeContext.getDefaultFileContext().getEncryptionContext(); - } - @Override public OptionalLong getMaxSequenceId() { return StoreUtils.getMaxSequenceIdInList(this.getStorefiles()); @@ -1218,7 +1204,9 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm } } } - HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag); + Encryption.Context encryptionContext = storeContext.getEncryptionContext(); + HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, + encryptionContext); Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName()); StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, getFileSystem()) @@ -1234,16 +1222,28 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm } private HFileContext createFileContext(Compression.Algorithm compression, - boolean includeMVCCReadpoint, boolean includesTag) { + boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } - HFileContext fileContext = storeContext.getDefaultFileContext(); - fileContext.setIncludesMvcc(includeMVCCReadpoint); - fileContext.setIncludesTags(includesTag); - fileContext.setCompression(compression); - fileContext.setFileCreateTime(EnvironmentEdgeManager.currentTime()); - return fileContext; + ColumnFamilyDescriptor family = getColumnFamilyDescriptor(); + HFileContext hFileContext = new HFileContextBuilder() + .withIncludesMvcc(includeMVCCReadpoint) + .withIncludesTags(includesTag) + .withCompression(compression) + .withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(getBlockSize()) + .withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(encryptionContext) + .withCreateTime(EnvironmentEdgeManager.currentTime()) + .withColumnFamily(getColumnFamilyDescriptor().getName()) + .withTableName(getTableName().getName()) + .withCellComparator(getComparator()) + .build(); + return hFileContext; } private long getTotalSize(Collection sfs) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 6ec9c51930c3..5414c5114608 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -46,6 +46,8 @@ public interface Store { int PRIORITY_USER = 1; int NO_PRIORITY = Integer.MIN_VALUE; + int getBlockSize(); + // General Accessors CellComparator getComparator(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java index 1a66076f1cd9..26233505db73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -24,21 +24,22 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; /** - * This carries the information on some of the meta data about the HStore. This - * meta data can be used across the HFileWriter/Readers and other HStore consumers without the + * This carries the immutable information and references on some of the meta data about the HStore. + * This meta data can be used across the HFileWriter/Readers and other HStore consumers without the * need of passing around the complete store. */ @InterfaceAudience.Private public final class StoreContext implements HeapSize { public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); - private final HFileContext defaultFileContext; + private final int blockSize; + private final Encryption.Context encryptionContext; private final CacheConfig cacheConf; private final HRegionFileSystem regionFileSystem; private final CellComparator comparator; @@ -50,7 +51,8 @@ public final class StoreContext implements HeapSize { private final RegionCoprocessorHost coprocessorHost; private StoreContext(Builder builder) { - this.defaultFileContext = builder.defaultFileContext; + this.blockSize = builder.blockSize; + this.encryptionContext = builder.encryptionContext; this.cacheConf = builder.cacheConf; this.regionFileSystem = builder.regionFileSystem; this.comparator = builder.comparator; @@ -62,8 +64,12 @@ private StoreContext(Builder builder) { this.coprocessorHost = builder.coprocessorHost; } - public HFileContext getDefaultFileContext() { - return defaultFileContext; + public int getBlockSize() { + return blockSize; + } + + public Encryption.Context getEncryptionContext() { + return encryptionContext; } public CacheConfig getCacheConf() { @@ -108,11 +114,12 @@ public static Builder getBuilder() { @Override public long heapSize() { - return FIXED_OVERHEAD + defaultFileContext.heapSize(); + return FIXED_OVERHEAD; } public static class Builder { - private HFileContext defaultFileContext; + private int blockSize; + private Encryption.Context encryptionContext; private CacheConfig cacheConf; private HRegionFileSystem regionFileSystem; private CellComparator comparator; @@ -123,8 +130,13 @@ public static class Builder { private Path familyStoreDirectoryPath; private RegionCoprocessorHost coprocessorHost; - public Builder withDefaultHFileContext(HFileContext defaultFileContext) { - this.defaultFileContext = defaultFileContext; + public Builder withBlockSize(int blockSize) { + this.blockSize = blockSize; + return this; + } + + public Builder withEncryptionContext(Encryption.Context encryptionContext) { + this.encryptionContext = encryptionContext; return this; } From 8a0269fbe6faa6b12edfab1db13c36d923558576 Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Tue, 5 Jan 2021 21:57:45 -0800 Subject: [PATCH 09/10] removed setter for encryptionContext in HFileContext and fixed indentation in constructor of HStore --- .../org/apache/hadoop/hbase/io/hfile/HFileContext.java | 4 ---- .../java/org/apache/hadoop/hbase/regionserver/HStore.java | 8 ++++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java index 2e8da6481aee..cfadb6cfd337 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java @@ -138,10 +138,6 @@ public Compression.Algorithm getCompression() { return compressAlgo; } - public void setCompression(Compression.Algorithm compressAlgo) { - this.compressAlgo = compressAlgo; - } - public boolean isUseHBaseChecksum() { return usesHBaseChecksum; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 4d740db1f679..816174739287 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -247,10 +247,10 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. this.conf = new CompoundConfiguration() - .add(confParam) - .addBytesMap(region.getTableDescriptor().getValues()) - .addStringMap(family.getConfiguration()) - .addBytesMap(family.getValues()); + .add(confParam) + .addBytesMap(region.getTableDescriptor().getValues()) + .addStringMap(family.getConfiguration()) + .addBytesMap(family.getValues()); this.region = region; this.storeContext = initializeStoreContext(family); From 4b8ab2955ad3d9810256d7f22d7de630c5617364 Mon Sep 17 00:00:00 2001 From: Stephen Wu Date: Wed, 6 Jan 2021 10:24:05 -0800 Subject: [PATCH 10/10] remove getters in constructor of HStore and remove getBlockSize from Store interface --- .../hadoop/hbase/regionserver/HMobStore.java | 3 ++- .../hadoop/hbase/regionserver/HStore.java | 23 +++++++------------ .../hadoop/hbase/regionserver/Store.java | 2 -- 3 files changed, 10 insertions(+), 18 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index ce8e1902a7d5..7ce7f0310c7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -227,7 +227,8 @@ public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), getStoreContext().getEncryptionContext(), StoreUtils.getChecksumType(conf), - StoreUtils.getBytesPerChecksum(conf), getBlockSize(), BloomType.NONE, isCompaction); + StoreUtils.getBytesPerChecksum(conf), getStoreContext().getBlockSize(), BloomType.NONE, + isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 816174739287..99880efece73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -256,28 +256,26 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.storeContext = initializeStoreContext(family); // Assemble the store's home directory and Ensure it exists. - getRegionFileSystem().createStoreDir(family.getNameAsString()); + region.getRegionFileSystem().createStoreDir(family.getNameAsString()); // set block storage policy for store directory String policyName = family.getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } - getRegionFileSystem().setStoragePolicy(getColumnFamilyName(), policyName.trim()); + region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName.trim()); - this.dataBlockEncoder = new HFileDataBlockEncoderImpl(getColumnFamilyDescriptor() - .getDataBlockEncoding()); + this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); // used by ScanQueryMatcher long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); LOG.trace("Time to purge deletes set to {}ms in {}", timeToPurgeDeletes, this); // Get TTL - long ttl = determineTTLFromFamily(getColumnFamilyDescriptor()); + long ttl = determineTTLFromFamily(family); // Why not just pass a HColumnDescriptor in here altogether? Even if have // to clone it? - scanInfo = new ScanInfo(conf, getColumnFamilyDescriptor(), ttl, timeToPurgeDeletes, - getComparator()); + scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, region.getCellComparator()); this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); @@ -294,7 +292,7 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; } - this.storeEngine = createStoreEngine(this, this.conf, getComparator()); + this.storeEngine = createStoreEngine(this, this.conf, region.getCellComparator()); List hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and @@ -409,7 +407,7 @@ protected CacheConfig createCacheConf(final ColumnFamilyDescriptor family) { */ protected StoreEngine createStoreEngine(HStore store, Configuration conf, CellComparator kvComparator) throws IOException { - return StoreEngine.create(store, conf, getComparator()); + return StoreEngine.create(store, conf, kvComparator); } /** @@ -434,11 +432,6 @@ StoreContext getStoreContext() { return storeContext; } - @Override - public int getBlockSize() { - return this.storeContext.getBlockSize(); - } - @Override public String getColumnFamilyName() { return this.storeContext.getFamily().getNameAsString(); @@ -1234,7 +1227,7 @@ private HFileContext createFileContext(Compression.Algorithm compression, .withCompressTags(family.isCompressTags()) .withChecksumType(StoreUtils.getChecksumType(conf)) .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(getBlockSize()) + .withBlockSize(family.getBlocksize()) .withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding()) .withEncryptionContext(encryptionContext) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 5414c5114608..6ec9c51930c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -46,8 +46,6 @@ public interface Store { int PRIORITY_USER = 1; int NO_PRIORITY = Integer.MIN_VALUE; - int getBlockSize(); - // General Accessors CellComparator getComparator();