diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
index 22d3c9ce2c0b..5388a1105c33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
@@ -17,13 +17,9 @@
*/
package org.apache.hadoop.hbase.io;
-import java.io.BufferedInputStream;
import java.io.DataInput;
-import java.io.DataInputStream;
import java.io.IOException;
-import java.io.InputStream;
import java.util.Arrays;
-import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -144,36 +140,6 @@ public Path write(final FileSystem fs, final Path p) throws IOException {
return p;
}
- /**
- * Read a Reference from FileSystem.
- * @return New Reference made from passed p
- */
- public static Reference read(final FileSystem fs, final Path p) throws IOException {
- InputStream in = fs.open(p);
- try {
- // I need to be able to move back in the stream if this is not a pb serialization so I can
- // do the Writable decoding instead.
- in = in.markSupported() ? in : new BufferedInputStream(in);
- int pblen = ProtobufUtil.lengthOfPBMagic();
- in.mark(pblen);
- byte[] pbuf = new byte[pblen];
- IOUtils.readFully(in, pbuf, 0, pblen);
- // WATCHOUT! Return in middle of function!!!
- if (ProtobufUtil.isPBMagicPrefix(pbuf)) return convert(FSProtos.Reference.parseFrom(in));
- // Else presume Writables. Need to reset the stream since it didn't start w/ pb.
- // We won't bother rewriting thie Reference as a pb since Reference is transitory.
- in.reset();
- Reference r = new Reference();
- DataInputStream dis = new DataInputStream(in);
- // Set in = dis so it gets the close below in the finally on our way out.
- in = dis;
- r.readFields(dis);
- return r;
- } finally {
- in.close();
- }
- }
-
public FSProtos.Reference convert() {
FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder();
builder.setRange(isTopFileRegion(getFileRegion())
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 06c36853b675..a25ebcf12290 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -313,28 +313,6 @@ StoreFileInfo getStoreFileInfo(final String familyName, final String fileName,
familyName, new Path(familyDir, fileName), tracker);
}
- /**
- * Returns true if the specified family has reference files
- * @param familyName Column Family Name
- * @return true if family contains reference files
- */
- public boolean hasReferences(final String familyName) throws IOException {
- Path storeDir = getStoreDir(familyName);
- FileStatus[] files = CommonFSUtils.listStatus(fs, storeDir);
- if (files != null) {
- for (FileStatus stat : files) {
- if (stat.isDirectory()) {
- continue;
- }
- if (StoreFileInfo.isReference(stat.getPath())) {
- LOG.trace("Reference {}", stat.getPath());
- return true;
- }
- }
- }
- return false;
- }
-
/** Returns the set of families present on disk n */
public Collection getFamilies() throws IOException {
FileStatus[] fds =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index c7e9166b54a8..5d0509ac3d1e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -50,6 +50,8 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -680,7 +682,9 @@ static LinkedList> splitScan(LinkedList