diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterBrokenStoreFileCleaner.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterBrokenStoreFileCleaner.java
new file mode 100644
index 000000000000..038fada8b944
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterBrokenStoreFileCleaner.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * BrokenStoreFileCleaner metrics for a specific table in a RegionServer.
+ */
+@InterfaceAudience.Private
+public interface MetricsMasterBrokenStoreFileCleaner {
+
+ /**
+ * The name of the metrics
+ */
+ String METRICS_NAME = "BrokenStoreFileCleaner";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ String METRICS_CONTEXT = "master";
+
+ /**
+ * Description
+ */
+ String METRICS_DESCRIPTION =
+ "Metrics about BrokenStoreFileCleaner results on a single HBase RegionServer";
+
+ /**
+ * The name of the metrics context that metrics will be under in jmx
+ */
+ String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
+
+
+ String DELETES = "BrokenStoreFileCleanerDeletes";
+ String DELETES_DESC = "Number of files deleted by BrokenStoreFileCleaner";
+ String FAILED_DELETES = "BrokenStoreFileCleanerFailedDeletes";
+ String FAILED_DELETES_DESC =
+ "Number of files BrokenStoreFileCleaner tried but failed to delete";
+ String RUNS = "BrokenStoreFileCleanerRuns";
+ String RUNS_DESC = "Number of time the BrokenStoreFileCleaner chore run";
+ String RUNTIME = "BrokenStoreFileCleanerRuntime";
+ String RUNTIME_DESC = "Time required to run BrokenStoreFileCleaner chore in milliseconds";
+
+ /**
+ * Increment the deleted files counter
+ * @param deletes number of new files deleted
+ */
+ public void incrementBrokenStoreFileCleanerDeletes(long deletes);
+
+ /**
+ * Increment the failed file deletes counter
+ * @param failedDeletes number of files the chore failed to delete
+ */
+ public void incrementBrokenStoreFileCleanerFailedDeletes(long failedDeletes);
+
+ /**
+ * Increment the number of cleaner runs counter
+ */
+ public void incrementBrokenStoreFileCleanerRuns(long runs);
+
+ /**
+ * Update the chore runtime
+ * @param milis Chore runtime
+ */
+ public void updateBrokenStoreFileCleanerTimer(long milis);
+
+}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterBrokenStoreFileCleanerImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterBrokenStoreFileCleanerImpl.java
new file mode 100644
index 000000000000..32ef24d335ed
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterBrokenStoreFileCleanerImpl.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.lib.MutableFastCounter;
+import org.apache.hadoop.metrics2.lib.MutableTimeHistogram;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Implementation of {@link MetricsMasterBrokenStoreFileCleaner} to track latencies for one table
+ * in a RegionServer.
+ */
+@InterfaceAudience.Private
+public class MetricsMasterBrokenStoreFileCleanerImpl extends BaseSourceImpl implements
+ MetricsMasterBrokenStoreFileCleaner {
+
+ private MutableFastCounter brokenStoreFileCleanerDeletes;
+ private MutableFastCounter brokenStoreFileCleanerFailedDeletes;
+ private MutableFastCounter brokenStoreFileCleanerRuns;
+ private MutableTimeHistogram brokenStoreFileCleanerTimer;
+
+ public MetricsMasterBrokenStoreFileCleanerImpl(String metricsName, String metricsDescription,
+ String metricsContext, String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+
+ brokenStoreFileCleanerDeletes =
+ getMetricsRegistry().newCounter(DELETES, DELETES_DESC, 0L);
+ brokenStoreFileCleanerFailedDeletes =
+ getMetricsRegistry().newCounter(FAILED_DELETES, FAILED_DELETES_DESC, 0L);
+ brokenStoreFileCleanerRuns = getMetricsRegistry().newCounter(RUNS, RUNS_DESC, 0L);
+ brokenStoreFileCleanerTimer = getMetricsRegistry().newTimeHistogram(RUNTIME, RUNTIME_DESC);
+ }
+
+ public MetricsMasterBrokenStoreFileCleanerImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+ @Override public void incrementBrokenStoreFileCleanerDeletes(long deletes) {
+ brokenStoreFileCleanerDeletes.incr(deletes);
+ }
+
+ @Override public void incrementBrokenStoreFileCleanerFailedDeletes(long failedDeletes) {
+ brokenStoreFileCleanerFailedDeletes.incr(failedDeletes);
+ }
+
+ @Override public void incrementBrokenStoreFileCleanerRuns(long runs) {
+ brokenStoreFileCleanerRuns.incr(runs);
+ }
+
+ @Override public void updateBrokenStoreFileCleanerTimer(long millis) {
+ brokenStoreFileCleanerTimer.add(millis);
+ }
+}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsBrokenStoreFileCleaner.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsBrokenStoreFileCleaner.java
new file mode 100644
index 000000000000..33f5a6219cc3
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsBrokenStoreFileCleaner.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * BrokenStoreFileCleaner metrics for a specific RegionServer.
+ */
+@InterfaceAudience.Private
+public interface MetricsBrokenStoreFileCleaner {
+
+ /**
+ * The name of the metrics
+ */
+ String METRICS_NAME = "BrokenStoreFileCleaner";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ String METRICS_CONTEXT = "regionserver";
+
+ /**
+ * Description
+ */
+ String METRICS_DESCRIPTION =
+ "Metrics about BrokenStoreFileCleaner results on a single HBase RegionServer";
+
+ /**
+ * The name of the metrics context that metrics will be under in jmx
+ */
+ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+
+ String DELETES = "BrokenStoreFileCleanerDeletes";
+ String DELETES_DESC = "Number of files deleted by BrokenStoreFileCleaner";
+ String FAILED_DELETES = "BrokenStoreFileCleanerFailedDeletes";
+ String FAILED_DELETES_DESC =
+ "Number of files BrokenStoreFileCleaner tried but failed to delete";
+ String RUNS = "BrokenStoreFileCleanerRuns";
+ String RUNS_DESC = "Number of times the BrokenStoreFileCleaner chore run";
+ String RUNTIME = "BrokenStoreFileCleanerRuntime";
+ String RUNTIME_DESC = "Time required to run BrokenStoreFileCleaner chore in milliseconds";
+
+ /**
+ * Increment the deleted files counter
+ * @param deletes number of new files deleted
+ */
+ public void incrementBrokenStoreFileCleanerDeletes(long deletes);
+
+ /**
+ * Increment the failed file deletes counter
+ * @param failedDeletes number of files the chore failed to delete
+ */
+ public void incrementBrokenStoreFileCleanerFailedDeletes(long failedDeletes);
+
+ /**
+ * Increment the number of cleaner runs counter
+ */
+ public void incrementBrokenStoreFileCleanerRuns();
+
+ /**
+ * Update the chore runtime
+ * @param milis Chore runtime
+ */
+ public void updateBrokenStoreFileCleanerTimer(long milis);
+
+}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsBrokenStoreFileCleanerImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsBrokenStoreFileCleanerImpl.java
new file mode 100644
index 000000000000..a75c5a5a6d44
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsBrokenStoreFileCleanerImpl.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.lib.MutableFastCounter;
+import org.apache.hadoop.metrics2.lib.MutableTimeHistogram;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Implementation of {@link MetricsBrokenStoreFileCleaner} to track metrics for a specific
+ * RegionServer.
+ */
+@InterfaceAudience.Private public class MetricsBrokenStoreFileCleanerImpl extends BaseSourceImpl
+ implements MetricsBrokenStoreFileCleaner {
+
+ private MutableFastCounter brokenStoreFileCleanerDeletes;
+ private MutableFastCounter brokenStoreFileCleanerFailedDeletes;
+ private MutableFastCounter brokenStoreFileCleanerRuns;
+ private MutableTimeHistogram brokenStoreFileCleanerTimer;
+
+ public MetricsBrokenStoreFileCleanerImpl(String metricsName, String metricsDescription,
+ String metricsContext, String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+
+ brokenStoreFileCleanerDeletes = getMetricsRegistry().newCounter(DELETES, DELETES_DESC, 0L);
+ brokenStoreFileCleanerFailedDeletes =
+ getMetricsRegistry().newCounter(FAILED_DELETES, FAILED_DELETES_DESC, 0L);
+ brokenStoreFileCleanerRuns = getMetricsRegistry().newCounter(RUNS, RUNS_DESC, 0L);
+ brokenStoreFileCleanerTimer = getMetricsRegistry().newTimeHistogram(RUNTIME, RUNTIME_DESC);
+ }
+
+ public MetricsBrokenStoreFileCleanerImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+ @Override public void incrementBrokenStoreFileCleanerDeletes(long deletes) {
+ brokenStoreFileCleanerDeletes.incr(deletes);
+ }
+
+ @Override public void incrementBrokenStoreFileCleanerFailedDeletes(long failedDeletes) {
+ brokenStoreFileCleanerFailedDeletes.incr(failedDeletes);
+ }
+
+ @Override public void incrementBrokenStoreFileCleanerRuns() {
+ brokenStoreFileCleanerRuns.incr();
+ }
+
+ @Override public void updateBrokenStoreFileCleanerTimer(long millis) {
+ brokenStoreFileCleanerTimer.add(millis);
+ }
+}
diff --git a/hbase-hadoop-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterBrokenStoreFileCleaner b/hbase-hadoop-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterBrokenStoreFileCleaner
new file mode 100644
index 000000000000..92b412b29958
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterBrokenStoreFileCleaner
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+org.apache.hadoop.hbase.master.MetricsMasterBrokenStoreFileCleanerImpl
diff --git a/hbase-hadoop-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsBrokenStoreFileCleaner b/hbase-hadoop-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsBrokenStoreFileCleaner
new file mode 100644
index 000000000000..5fbf21dcb5e1
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsBrokenStoreFileCleaner
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+org.apache.hadoop.hbase.regionserver.MetricsBrokenStoreFileCleanerImpl
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto
index 4ec09991b343..554a58263481 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto
@@ -184,6 +184,18 @@ message GetLiveRegionServersResponse {
required uint32 total = 2;
}
+
+message BrokenStoreFileCleanerUsageRequest {
+ required string server_name = 1;
+ required uint64 runtime = 2;
+ optional uint64 deleted_files = 3;
+ optional uint64 failed_deletes = 4;
+ required uint64 runs = 5;
+}
+
+message BrokenStoreFileCleanerUsageResponse {
+}
+
service RegionServerStatusService {
/** Called when a region server first starts. */
rpc RegionServerStartup(RegionServerStartupRequest)
@@ -230,4 +242,8 @@ service RegionServerStatusService {
/** Get some live region servers to be used as seed for bootstrap nodes */
rpc GetLiveRegionServers(GetLiveRegionServersRequest)
returns(GetLiveRegionServersResponse);
+
+ /** Report BrokenStoreFileCleaner chore metrics to master */
+ rpc ReportBrokenStoreFileCleanerUsage(BrokenStoreFileCleanerUsageRequest)
+ returns(BrokenStoreFileCleanerUsageResponse);
}
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
new file mode 100644
index 000000000000..2a269ea4ac4e
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+// This file contains protocol buffers that are used for store file tracker.
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "StoreFileTrackerProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+message StoreFileEntry {
+ required string name = 1;
+ required uint64 size = 2;
+}
+
+message StoreFileList {
+ required uint64 timestamp = 1;
+ repeated StoreFileEntry store_file = 2;
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index 74836ce39c6b..fbed724a207e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -201,7 +201,6 @@ public static boolean isHFileLink(final Path path) {
return isHFileLink(path.getName());
}
-
/**
* @param fileName File name to check.
* @return True if the path is a HFileLink.
@@ -322,10 +321,10 @@ public static String createHFileLinkName(final TableName tableName,
* @param dstFamilyPath - Destination path (table/region/cf/)
* @param hfileRegionInfo - Linked HFile Region Info
* @param hfileName - Linked HFile name
- * @return true if the file is created, otherwise the file exists.
- * @throws IOException on file or parent directory creation failure
+ * @return the file link name.
+ * @throws IOException on file or parent directory creation failure.
*/
- public static boolean create(final Configuration conf, final FileSystem fs,
+ public static String create(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final RegionInfo hfileRegionInfo,
final String hfileName) throws IOException {
return create(conf, fs, dstFamilyPath, hfileRegionInfo, hfileName, true);
@@ -343,10 +342,10 @@ public static boolean create(final Configuration conf, final FileSystem fs,
* @param hfileRegionInfo - Linked HFile Region Info
* @param hfileName - Linked HFile name
* @param createBackRef - Whether back reference should be created. Defaults to true.
- * @return true if the file is created, otherwise the file exists.
- * @throws IOException on file or parent directory creation failure
+ * @return the file link name.
+ * @throws IOException on file or parent directory creation failure.
*/
- public static boolean create(final Configuration conf, final FileSystem fs,
+ public static String create(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final RegionInfo hfileRegionInfo,
final String hfileName, final boolean createBackRef) throws IOException {
TableName linkedTable = hfileRegionInfo.getTable();
@@ -366,17 +365,18 @@ public static boolean create(final Configuration conf, final FileSystem fs,
* @param linkedTable - Linked Table Name
* @param linkedRegion - Linked Region Name
* @param hfileName - Linked HFile name
- * @return true if the file is created, otherwise the file exists.
- * @throws IOException on file or parent directory creation failure
+ * @return the file link name.
+ * @throws IOException on file or parent directory creation failure.
*/
- public static boolean create(final Configuration conf, final FileSystem fs,
+ public static String create(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion,
final String hfileName) throws IOException {
return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName, true);
}
/**
- * Create a new HFileLink
+ * Create a new HFileLink. In the event of link creation failure, this method throws an
+ * IOException, so that the calling upper laying can decide on how to proceed with this.
*
*
It also adds a back-reference to the hfile back-reference directory
* to simplify the reference-count and the cleaning process.
@@ -388,10 +388,10 @@ public static boolean create(final Configuration conf, final FileSystem fs,
* @param linkedRegion - Linked Region Name
* @param hfileName - Linked HFile name
* @param createBackRef - Whether back reference should be created. Defaults to true.
- * @return true if the file is created, otherwise the file exists.
- * @throws IOException on file or parent directory creation failure
+ * @return the file link name.
+ * @throws IOException on file or parent directory creation failure.
*/
- public static boolean create(final Configuration conf, final FileSystem fs,
+ public static String create(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion,
final String hfileName, final boolean createBackRef) throws IOException {
String familyName = dstFamilyPath.getName();
@@ -417,10 +417,10 @@ public static boolean create(final Configuration conf, final FileSystem fs,
* @param linkedRegion - Linked Region Name
* @param hfileName - Linked HFile name
* @param createBackRef - Whether back reference should be created. Defaults to true.
- * @return true if the file is created, otherwise the file exists.
+ * @return the file link name.
* @throws IOException on file or parent directory creation failure
*/
- public static boolean create(final Configuration conf, final FileSystem fs,
+ public static String create(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final String familyName, final String dstTableName,
final String dstRegionName, final TableName linkedTable, final String linkedRegion,
final String hfileName, final boolean createBackRef) throws IOException {
@@ -444,7 +444,9 @@ public static boolean create(final Configuration conf, final FileSystem fs,
}
try {
// Create the link
- return fs.createNewFile(new Path(dstFamilyPath, name));
+ if (fs.createNewFile(new Path(dstFamilyPath, name))) {
+ return name;
+ }
} catch (IOException e) {
LOG.error("couldn't create the link=" + name + " for " + dstFamilyPath, e);
// Revert the reference if the link creation failed
@@ -453,25 +455,8 @@ public static boolean create(final Configuration conf, final FileSystem fs,
}
throw e;
}
- }
-
- /**
- * Create a new HFileLink starting from a hfileLink name
- *
- *
It also adds a back-reference to the hfile back-reference directory
- * to simplify the reference-count and the cleaning process.
- *
- * @param conf {@link Configuration} to read for the archive directory name
- * @param fs {@link FileSystem} on which to write the HFileLink
- * @param dstFamilyPath - Destination path (table/region/cf/)
- * @param hfileLinkName - HFileLink name (it contains hfile-region-table)
- * @return true if the file is created, otherwise the file exists.
- * @throws IOException on file or parent directory creation failure
- */
- public static boolean createFromHFileLink(final Configuration conf, final FileSystem fs,
- final Path dstFamilyPath, final String hfileLinkName)
- throws IOException {
- return createFromHFileLink(conf, fs, dstFamilyPath, hfileLinkName, true);
+ throw new IOException("File link=" + name + " already exists under " +
+ dstFamilyPath + " folder.");
}
/**
@@ -485,10 +470,10 @@ public static boolean createFromHFileLink(final Configuration conf, final FileSy
* @param dstFamilyPath - Destination path (table/region/cf/)
* @param hfileLinkName - HFileLink name (it contains hfile-region-table)
* @param createBackRef - Whether back reference should be created. Defaults to true.
- * @return true if the file is created, otherwise the file exists.
- * @throws IOException on file or parent directory creation failure
+ * @return the file link name.
+ * @throws IOException on file or parent directory creation failure.
*/
- public static boolean createFromHFileLink(final Configuration conf, final FileSystem fs,
+ public static String createFromHFileLink(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final String hfileLinkName, final boolean createBackRef)
throws IOException {
Matcher m = LINK_NAME_PATTERN.matcher(hfileLinkName);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ad6969ba9abf..2dac3296bae8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -131,6 +131,7 @@
import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
@@ -376,6 +377,7 @@ public class HMaster extends HBaseServerBase implements Maste
private ReplicationBarrierCleaner replicationBarrierCleaner;
private MobFileCleanerChore mobFileCleanerChore;
private MobFileCompactionChore mobFileCompactionChore;
+ private RollingUpgradeChore rollingUpgradeChore;
// used to synchronize the mobCompactionStates
private final IdLock mobCompactionLock = new IdLock();
// save the information of mob compactions in tables.
@@ -1222,6 +1224,9 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc
LOG.debug("Balancer post startup initialization complete, took " + (
(EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
}
+
+ this.rollingUpgradeChore = new RollingUpgradeChore(this);
+ getChoreService().scheduleChore(rollingUpgradeChore);
}
private void createMissingCFsInMetaDuringUpgrade(
@@ -1713,6 +1718,7 @@ protected void stopChores() {
shutdownChore(snapshotCleanerChore);
shutdownChore(hbckChore);
shutdownChore(regionsRecoveryChore);
+ shutdownChore(rollingUpgradeChore);
}
/**
@@ -4026,4 +4032,12 @@ public List getMetaLocations() {
public Collection getLiveRegionServers() {
return regionServerTracker.getRegionServers();
}
+
+ public void reportBrokenStoreFileCleanerUsage(String serverName, long runtime,
+ long deletedFiles, long failedDelets, long runs) {
+ getMasterMetrics().incrementBrokenStoreFileCleanerDeletes(deletedFiles);
+ getMasterMetrics().incrementBrokenStoreFileCleanerFailedDeletes(failedDelets);
+ getMasterMetrics().incrementBrokenStoreFileCleanerRuns(runs);
+ getMasterMetrics().updateBrokenStoreFileCleanerTimer(runtime);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 46bc8c2158eb..2ae643b8bbcb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -377,6 +377,8 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.UpdateRSGroupConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.BrokenStoreFileCleanerUsageResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
@@ -3492,4 +3494,13 @@ public GetLiveRegionServersResponse getLiveRegionServers(RpcController controlle
.forEach(builder::addServer);
return builder.build();
}
+
+ @Override
+ public BrokenStoreFileCleanerUsageResponse reportBrokenStoreFileCleanerUsage(
+ RpcController controller, BrokenStoreFileCleanerUsageRequest request)
+ throws ServiceException {
+ this.server.reportBrokenStoreFileCleanerUsage(request.getServerName(), request.getRuntime(),
+ request.getDeletedFiles(), request.getFailedDeletes(), request.getRuns());
+ return BrokenStoreFileCleanerUsageResponse.newBuilder().build();
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServicesVersionWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServicesVersionWrapper.java
index e3bf5de85d1f..ce1ae3241d5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServicesVersionWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServicesVersionWrapper.java
@@ -25,6 +25,8 @@
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.BrokenStoreFileCleanerUsageResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLiveRegionServersResponse;
@@ -116,4 +118,10 @@ public GetLiveRegionServersResponse getLiveRegionServers(RpcController controlle
GetLiveRegionServersRequest request) throws ServiceException {
return masterRpcServices.getLiveRegionServers(controller, request);
}
+
+ @Override
+ public BrokenStoreFileCleanerUsageResponse reportBrokenStoreFileCleanerUsage(
+ RpcController controller, BrokenStoreFileCleanerUsageRequest request) throws ServiceException {
+ return masterRpcServices.reportBrokenStoreFileCleanerUsage(controller, request);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
index 53030c7609d0..cb0e5cea0f99 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
@@ -44,6 +44,7 @@ public class MetricsMaster {
private MetricsMasterQuotaSource masterQuotaSource;
private ProcedureMetrics serverCrashProcMetrics;
+ private MetricsMasterBrokenStoreFileCleaner brokenSFC;
public MetricsMaster(MetricsMasterWrapper masterWrapper) {
masterSource = CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class).create(masterWrapper);
@@ -53,6 +54,8 @@ public MetricsMaster(MetricsMasterWrapper masterWrapper) {
CompatibilitySingletonFactory.getInstance(MetricsMasterQuotaSourceFactory.class).create(masterWrapper);
serverCrashProcMetrics = convertToProcedureMetrics(masterSource.getServerCrashMetrics());
+ brokenSFC =
+ CompatibilitySingletonFactory.getInstance(MetricsMasterBrokenStoreFileCleaner.class);
}
// for unit-test usage
@@ -191,4 +194,20 @@ public void incrementSnapshotSizeComputationTime(final long executionTime) {
public void incrementSnapshotFetchTime(long executionTime) {
masterQuotaSource.incrementSnapshotObserverSnapshotFetchTime(executionTime);
}
+
+ public void incrementBrokenStoreFileCleanerDeletes(long deletes) {
+ brokenSFC.incrementBrokenStoreFileCleanerDeletes(deletes);
+ }
+
+ public void incrementBrokenStoreFileCleanerFailedDeletes(long failedDeletes) {
+ brokenSFC.incrementBrokenStoreFileCleanerFailedDeletes(failedDeletes);
+ }
+
+ public void incrementBrokenStoreFileCleanerRuns(long runs) {
+ brokenSFC.incrementBrokenStoreFileCleanerRuns(runs);
+ }
+
+ public void updateBrokenStoreFileCleanerTimer(long milis) {
+ brokenSFC.updateBrokenStoreFileCleanerTimer(milis);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index da3d73ea852d..0f41db5cad88 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -24,7 +24,6 @@
import java.util.Collections;
import java.util.List;
import java.util.stream.Stream;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -56,6 +55,8 @@
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.StoreUtils;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.wal.WALSplitUtil;
@@ -587,30 +588,33 @@ private void createMergedRegion(final MasterProcedureEnv env) throws IOException
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable());
final FileSystem fs = mfs.getFileSystem();
-
+ List mergedFiles = new ArrayList<>();
HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(
env.getMasterConfiguration(), fs, tableDir, mergedRegion);
for (RegionInfo ri: this.regionsToMerge) {
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
env.getMasterConfiguration(), fs, tableDir, ri, false);
- mergeStoreFiles(env, regionFs, mergeRegionFs, mergedRegion);
+ mergedFiles.addAll(mergeStoreFiles(env, regionFs, mergeRegionFs, mergedRegion));
}
assert mergeRegionFs != null;
- mergeRegionFs.commitMergedRegion();
+ mergeRegionFs.commitMergedRegion(mergedFiles, env);
// Prepare to create merged regions
env.getAssignmentManager().getRegionStates().
getOrCreateRegionStateNode(mergedRegion).setState(State.MERGING_NEW);
}
- private void mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem regionFs,
+ private List mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem regionFs,
HRegionFileSystem mergeRegionFs, RegionInfo mergedRegion) throws IOException {
final TableDescriptor htd = env.getMasterServices().getTableDescriptors()
.get(mergedRegion.getTable());
+ List mergedFiles = new ArrayList<>();
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
String family = hcd.getNameAsString();
- final Collection storeFiles = regionFs.getStoreFiles(family);
+ StoreFileTracker tracker =
+ StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, hcd, regionFs);
+ final Collection storeFiles = tracker.load();
if (storeFiles != null && storeFiles.size() > 0) {
final Configuration storeConfiguration =
StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd);
@@ -622,11 +626,13 @@ private void mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem regionFs,
// is running in a regionserver's Store context, or we might not be able
// to read the hfiles.
storeFileInfo.setConf(storeConfiguration);
- mergeRegionFs.mergeStoreFile(regionFs.getRegionInfo(), family,
+ Path refFile = mergeRegionFs.mergeStoreFile(regionFs.getRegionInfo(), family,
new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED));
+ mergedFiles.add(refFile);
}
}
}
+ return mergedFiles;
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index fbd87290d8c2..effdba4f2012 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -33,7 +33,6 @@
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -66,6 +65,8 @@
import org.apache.hadoop.hbase.regionserver.RegionSplitRestriction;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.StoreUtils;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -621,21 +622,20 @@ public void createDaughterRegions(final MasterProcedureEnv env) throws IOExcepti
final FileSystem fs = mfs.getFileSystem();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
env.getMasterConfiguration(), fs, tabledir, getParentRegion(), false);
-
regionFs.createSplitsDir(daughterOneRI, daughterTwoRI);
- Pair expectedReferences = splitStoreFiles(env, regionFs);
+ Pair, List> expectedReferences = splitStoreFiles(env, regionFs);
- assertSplitResultFilesCount(fs, expectedReferences.getFirst(),
+ assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(),
regionFs.getSplitsDir(daughterOneRI));
- regionFs.commitDaughterRegion(daughterOneRI);
- assertSplitResultFilesCount(fs, expectedReferences.getFirst(),
+ regionFs.commitDaughterRegion(daughterOneRI, expectedReferences.getFirst(), env);
+ assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(),
new Path(tabledir, daughterOneRI.getEncodedName()));
- assertSplitResultFilesCount(fs, expectedReferences.getSecond(),
+ assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(),
regionFs.getSplitsDir(daughterTwoRI));
- regionFs.commitDaughterRegion(daughterTwoRI);
- assertSplitResultFilesCount(fs, expectedReferences.getSecond(),
+ regionFs.commitDaughterRegion(daughterTwoRI, expectedReferences.getSecond(), env);
+ assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(),
new Path(tabledir, daughterTwoRI.getEncodedName()));
}
@@ -652,7 +652,7 @@ private void deleteDaughterRegions(final MasterProcedureEnv env) throws IOExcept
* Create Split directory
* @param env MasterProcedureEnv
*/
- private Pair splitStoreFiles(final MasterProcedureEnv env,
+ private Pair, List> splitStoreFiles(final MasterProcedureEnv env,
final HRegionFileSystem regionFs) throws IOException {
final Configuration conf = env.getMasterConfiguration();
TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
@@ -668,7 +668,9 @@ private Pair splitStoreFiles(final MasterProcedureEnv env,
new HashMap>(htd.getColumnFamilyCount());
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
String family = cfd.getNameAsString();
- Collection sfis = regionFs.getStoreFiles(family);
+ StoreFileTracker tracker =
+ StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs);
+ Collection sfis = tracker.load();
if (sfis == null) {
continue;
}
@@ -694,7 +696,7 @@ private Pair splitStoreFiles(final MasterProcedureEnv env,
}
if (nbFiles == 0) {
// no file needs to be splitted.
- return new Pair(0, 0);
+ return new Pair<>(Collections.emptyList(), Collections.emptyList());
}
// Max #threads is the smaller of the number of storefiles or the default max determined above.
int maxThreads = Math.min(
@@ -752,14 +754,18 @@ private Pair splitStoreFiles(final MasterProcedureEnv env,
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
- int daughterA = 0;
- int daughterB = 0;
+ List daughterA = new ArrayList<>();
+ List daughterB = new ArrayList<>();
// Look for any exception
for (Future> future : futures) {
try {
Pair p = future.get();
- daughterA += p.getFirst() != null ? 1 : 0;
- daughterB += p.getSecond() != null ? 1 : 0;
+ if(p.getFirst() != null){
+ daughterA.add(p.getFirst());
+ }
+ if(p.getSecond() != null){
+ daughterB.add(p.getSecond());
+ }
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
} catch (ExecutionException e) {
@@ -772,7 +778,7 @@ private Pair splitStoreFiles(final MasterProcedureEnv env,
getParentRegion().getShortNameToLog() + " Daughter A: " + daughterA +
" storefiles, Daughter B: " + daughterB + " storefiles.");
}
- return new Pair(daughterA, daughterB);
+ return new Pair<>(daughterA, daughterB);
}
private void assertSplitResultFilesCount(final FileSystem fs,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/migrate/RollingUpgradeChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/migrate/RollingUpgradeChore.java
new file mode 100644
index 000000000000..3896b41f6625
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/migrate/RollingUpgradeChore.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.migrate;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.MigrateStoreFileTrackerProcedure;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * To avoid too many migrating/upgrade threads to be submitted at the time during master
+ * initialization, RollingUpgradeChore handles all rolling-upgrade tasks.
+ * */
+@InterfaceAudience.Private
+public class RollingUpgradeChore extends ScheduledChore {
+
+ static final String ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY =
+ "hbase.master.rolling.upgrade.chore.period.secs";
+ static final int DFAULT_ROLLING_UPGRADE_CHORE_PERIOD_SECONDS = 10; // 10 seconds by default
+
+ static final String ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY =
+ "hbase.master.rolling.upgrade.chore.delay.secs";
+ static final long DEFAULT_ROLLING_UPGRADE_CHORE_DELAY_SECONDS = 30; // 30 seconds
+
+ static final int CONCURRENT_PROCEDURES_COUNT = 5;
+
+ private final static Logger LOG = LoggerFactory.getLogger(RollingUpgradeChore.class);
+ ProcedureExecutor procedureExecutor;
+ private TableDescriptors tableDescriptors;
+ private List processingProcs = new ArrayList<>();
+
+ public RollingUpgradeChore(MasterServices masterServices) {
+ this(masterServices.getConfiguration(), masterServices.getMasterProcedureExecutor(),
+ masterServices.getTableDescriptors(), masterServices);
+ }
+
+ private RollingUpgradeChore(Configuration conf,
+ ProcedureExecutor procedureExecutor, TableDescriptors tableDescriptors,
+ Stoppable stopper) {
+ super(RollingUpgradeChore.class.getSimpleName(), stopper, conf
+ .getInt(ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY,
+ DFAULT_ROLLING_UPGRADE_CHORE_PERIOD_SECONDS), conf
+ .getLong(ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY,
+ DEFAULT_ROLLING_UPGRADE_CHORE_DELAY_SECONDS),
+ TimeUnit.SECONDS);
+ this.procedureExecutor = procedureExecutor;
+ this.tableDescriptors = tableDescriptors;
+ }
+
+ @Override
+ protected void chore() {
+ if (isCompletelyMigrateSFT(CONCURRENT_PROCEDURES_COUNT)) {
+ LOG.info("All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore!");
+ shutdown();
+ }
+ }
+
+ private boolean isCompletelyMigrateSFT(int concurrentCount){
+ Iterator iter = processingProcs.iterator();
+ while(iter.hasNext()){
+ MigrateStoreFileTrackerProcedure proc = iter.next();
+ if(procedureExecutor.isFinished(proc.getProcId())){
+ iter.remove();
+ }
+ }
+ // No new migration procedures will be submitted until
+ // all procedures executed last time are completed.
+ if (!processingProcs.isEmpty()) {
+ return false;
+ }
+
+ Map migrateSFTTables;
+ try {
+ migrateSFTTables = tableDescriptors.getAll().entrySet().stream().filter(entry -> {
+ TableDescriptor td = entry.getValue();
+ return StringUtils.isEmpty(td.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
+ }).limit(concurrentCount).collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
+ } catch (IOException e) {
+ LOG.warn("Failed to migrate StoreFileTracker", e);
+ return false;
+ }
+
+ if (migrateSFTTables.isEmpty()) {
+ LOG.info("There is no table to migrate StoreFileTracker!");
+ return true;
+ }
+
+ for (Map.Entry entry : migrateSFTTables.entrySet()) {
+ TableDescriptor tableDescriptor = entry.getValue();
+ MigrateStoreFileTrackerProcedure proc =
+ new MigrateStoreFileTrackerProcedure(procedureExecutor.getEnvironment(), tableDescriptor);
+ procedureExecutor.submitProcedure(proc);
+ processingProcs.add(proc);
+ }
+ return false;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 8157af99ba4b..7157fbf04d40 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -41,7 +41,6 @@
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsRegions;
-import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
@@ -459,56 +458,25 @@ private List createFsLayout(
List newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
- final Path tempdir = mfs.getTempDir();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
- final Path tempTableDir = CommonFSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
- if (CommonFSUtils.isExists(mfs.getFileSystem(), tempTableDir)) {
+ final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(),
+ tableDescriptor.getTableName());
+ if (CommonFSUtils.isExists(mfs.getFileSystem(), tableDir)) {
// if the region dirs exist, will cause exception and unlimited retry (see HBASE-24546)
- LOG.warn("temp table dir already exists on disk: {}, will be deleted.", tempTableDir);
- CommonFSUtils.deleteDirectory(mfs.getFileSystem(), tempTableDir);
+ LOG.warn("temp table dir already exists on disk: {}, will be deleted.", tableDir);
+ CommonFSUtils.deleteDirectory(mfs.getFileSystem(), tableDir);
}
- ((FSTableDescriptors) (env.getMasterServices().getTableDescriptors()))
- .createTableDescriptorForTableDirectory(tempTableDir,
- TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
+ ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
+ .createTableDescriptorForTableDirectory(tableDir,
+ TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(
- env, tempdir, tableDescriptor.getTableName(), newRegions);
-
- // 3. Move Table temp directory to the hbase root location
- CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
- // Move Table temp mob directory to the hbase root location
- Path tempMobTableDir = MobUtils.getMobTableDir(tempdir, tableDescriptor.getTableName());
- if (mfs.getFileSystem().exists(tempMobTableDir)) {
- moveTempMobDirectoryToHBaseRoot(mfs, tableDescriptor, tempMobTableDir);
- }
- return newRegions;
- }
+ env, mfs.getRootDir(), tableDescriptor.getTableName(), newRegions);
- /**
- * Move table temp mob directory to the hbase root location
- * @param mfs The master file system
- * @param tableDescriptor The table to operate on
- * @param tempMobTableDir The temp mob directory of table
- * @throws IOException If failed to move temp mob dir to hbase root dir
- */
- private void moveTempMobDirectoryToHBaseRoot(final MasterFileSystem mfs,
- final TableDescriptor tableDescriptor, final Path tempMobTableDir) throws IOException {
- FileSystem fs = mfs.getFileSystem();
- final Path tableMobDir =
- MobUtils.getMobTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
- if (!fs.delete(tableMobDir, true) && fs.exists(tableMobDir)) {
- throw new IOException("Couldn't delete mob table " + tableMobDir);
- }
- if (!fs.exists(tableMobDir.getParent())) {
- fs.mkdirs(tableMobDir.getParent());
- }
- if (!fs.rename(tempMobTableDir, tableMobDir)) {
- throw new IOException("Unable to move mob table from temp=" + tempMobTableDir
- + " to hbase root=" + tableMobDir);
- }
+ return newRegions;
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 2313e70f75bb..d77b95f186d4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.hbase.master.procedure;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Supplier;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
@@ -37,6 +37,7 @@
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -274,17 +275,22 @@ private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
}
+ // check for store file tracker configurations
+ StoreFileTrackerFactory.checkForCreateTable(env.getMasterConfiguration(), tableDescriptor);
+
return true;
}
private void preCreate(final MasterProcedureEnv env)
throws IOException, InterruptedException {
if (!getTableName().isSystemTable()) {
- ProcedureSyncWait.getMasterQuotaManager(env)
- .checkNamespaceTableAndRegionQuota(
- getTableName(), (newRegions != null ? newRegions.size() : 0));
+ ProcedureSyncWait.getMasterQuotaManager(env).checkNamespaceTableAndRegionQuota(getTableName(),
+ (newRegions != null ? newRegions.size() : 0));
}
+ tableDescriptor = StoreFileTrackerFactory.updateWithTrackerConfigs(env.getMasterConfiguration(),
+ tableDescriptor);
+
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
final RegionInfo[] regions = newRegions == null ? null :
@@ -329,41 +335,22 @@ protected static List createFsLayout(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, List newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
- final Path tempdir = mfs.getTempDir();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
- final Path tempTableDir = CommonFSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
+ final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(),
+ tableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
- .createTableDescriptorForTableDirectory(tempTableDir, tableDescriptor, false);
+ .createTableDescriptorForTableDirectory(
+ tableDir, tableDescriptor, false);
// 2. Create Regions
- newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
+ newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(),
tableDescriptor.getTableName(), newRegions);
- // 3. Move Table temp directory to the hbase root location
- moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
-
return newRegions;
}
- protected static void moveTempDirectoryToHBaseRoot(
- final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
- final Path tempTableDir) throws IOException {
- final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
- final Path tableDir =
- CommonFSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
- FileSystem fs = mfs.getFileSystem();
- if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
- throw new IOException("Couldn't delete " + tableDir);
- }
- if (!fs.rename(tempTableDir, tableDir)) {
- throw new IOException("Unable to move table from temp=" + tempTableDir +
- " to hbase root=" + tableDir);
- }
- }
-
protected static List addTableToMeta(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final List regions) throws IOException {
assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 80dddc7ccda1..297efc240fca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -20,10 +20,8 @@
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -277,82 +275,49 @@ protected static void deleteFromFs(final MasterProcedureEnv env,
final boolean archive) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
- final Path tempdir = mfs.getTempDir();
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableName);
- final Path tempTableDir = CommonFSUtils.getTableDir(tempdir, tableName);
if (fs.exists(tableDir)) {
- // Ensure temp exists
- if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
- throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
- }
-
- // Ensure parent exists
- if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
- throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
- }
-
- if (fs.exists(tempTableDir)) {
- // TODO
- // what's in this dir? something old? probably something manual from the user...
- // let's get rid of this stuff...
- FileStatus[] files = fs.listStatus(tempTableDir);
- if (files != null && files.length > 0) {
- List regionDirList = Arrays.stream(files)
- .filter(FileStatus::isDirectory)
- .map(FileStatus::getPath)
- .collect(Collectors.toList());
- HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(),
- tempTableDir, regionDirList);
+ // Archive regions from FS (temp directory)
+ if (archive) {
+ List regionDirList = regions.stream().filter(RegionReplicaUtil::isDefaultReplica)
+ .map(region ->
+ FSUtils.getRegionDirFromTableDir(tableDir, region)).collect(Collectors.toList());
+ HFileArchiver
+ .archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tableDir,
+ regionDirList);
+ if (!regionDirList.isEmpty()) {
+ LOG.debug("Archived {} regions", tableName);
}
- fs.delete(tempTableDir, true);
}
- // Move the table in /hbase/.tmp
- if (!fs.rename(tableDir, tempTableDir)) {
- throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
+ // Archive mob data
+ Path mobTableDir =
+ CommonFSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName);
+ Path regionDir = new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
+ if (fs.exists(regionDir)) {
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
}
- }
- // Archive regions from FS (temp directory)
- if (archive) {
- List regionDirList = regions.stream().filter(RegionReplicaUtil::isDefaultReplica)
- .map(region -> FSUtils.getRegionDirFromTableDir(tempTableDir, region))
- .collect(Collectors.toList());
- HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir,
- regionDirList);
- if (!regionDirList.isEmpty()) {
- LOG.debug("Archived {} regions", tableName);
+ // Delete table directory from FS
+ if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
+ throw new IOException("Couldn't delete " + tableDir);
}
- }
- // Archive mob data
- Path mobTableDir =
- CommonFSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName);
- Path regionDir =
- new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
- if (fs.exists(regionDir)) {
- HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
- }
-
- // Delete table directory from FS (temp directory)
- if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
- throw new IOException("Couldn't delete " + tempTableDir);
- }
-
- // Delete the table directory where the mob files are saved
- if (mobTableDir != null && fs.exists(mobTableDir)) {
- if (!fs.delete(mobTableDir, true)) {
- throw new IOException("Couldn't delete mob dir " + mobTableDir);
+ // Delete the table directory where the mob files are saved
+ if (mobTableDir != null && fs.exists(mobTableDir)) {
+ if (!fs.delete(mobTableDir, true)) {
+ throw new IOException("Couldn't delete mob dir " + mobTableDir);
+ }
}
- }
- // Delete the directory on wal filesystem
- FileSystem walFs = mfs.getWALFileSystem();
- Path tableWALDir = CommonFSUtils.getWALTableDir(env.getMasterConfiguration(), tableName);
- if (walFs.exists(tableWALDir) && !walFs.delete(tableWALDir, true)) {
- throw new IOException("Couldn't delete table dir on wal filesystem" + tableWALDir);
+ // Delete the directory on wal filesystem
+ FileSystem walFs = mfs.getWALFileSystem();
+ Path tableWALDir = CommonFSUtils.getWALTableDir(env.getMasterConfiguration(), tableName);
+ if (walFs.exists(tableWALDir) && !walFs.delete(tableWALDir, true)) {
+ throw new IOException("Couldn't delete table dir on wal filesystem" + tableWALDir);
+ }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 247dd9c202f4..1640644328f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
import org.apache.hadoop.hbase.util.Bytes;
@@ -325,6 +326,10 @@ private void prepareModify(final MasterProcedureEnv env) throws IOException {
modifiedTableDescriptor.getRegionServerGroup(), forWhom);
MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
}
+
+ // check for store file tracker configurations
+ StoreFileTrackerFactory.checkForModifyTable(env.getMasterConfiguration(),
+ unmodifiedTableDescriptor, modifiedTableDescriptor);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index c45fdff7ca16..15f0a73a9df9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -29,7 +29,6 @@
import java.util.List;
import java.util.Map.Entry;
import java.util.Optional;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -39,7 +38,6 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.CellSink;
import org.apache.hadoop.hbase.regionserver.HMobStore;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
@@ -144,17 +142,16 @@ public InternalScanner createScanner(ScanInfo scanInfo, List s
};
private final CellSinkFactory writerFactory =
- new CellSinkFactory() {
- @Override
- public StoreFileWriter createWriter(InternalScanner scanner,
- org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
- boolean shouldDropBehind, boolean major) throws IOException {
- // make this writer with tags always because of possible new cells with tags.
- return store.createWriterInTmp(fd.maxKeyCount,
- major ? majorCompactionCompression : minorCompactionCompression,
- true, true, true, shouldDropBehind);
- }
- };
+ new CellSinkFactory() {
+ @Override
+ public StoreFileWriter createWriter(InternalScanner scanner,
+ org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
+ boolean shouldDropBehind, boolean major) throws IOException {
+ // make this writer with tags always because of possible new cells with tags.
+ return store.getStoreEngine().createWriter(
+ createParams(fd, shouldDropBehind, major).includeMVCCReadpoint(true).includesTag(true));
+ }
+ };
public DefaultMobStoreCompactor(Configuration conf, HStore store) {
super(conf, store);
@@ -288,7 +285,6 @@ private void calculateMobLengthMap(SetMultimap mobRefs) throw
*
* @param fd File details
* @param scanner Where to read from.
- * @param writer Where to write to.
* @param smallestReadPoint Smallest read point.
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
* @param throughputController The compaction throughput controller.
@@ -297,7 +293,7 @@ private void calculateMobLengthMap(SetMultimap mobRefs) throw
* @return Whether compaction ended; false if it was interrupted for any reason.
*/
@Override
- protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer,
+ protected boolean performCompaction(FileDetails fd, InternalScanner scanner,
long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController,
boolean major, int numofFilesToCompact) throws IOException {
long bytesWrittenProgressForLog = 0;
@@ -667,7 +663,7 @@ private void commitOrAbortMobWriter(StoreFileWriter mobFileWriter, long maxSeqId
@Override
- protected List commitWriter(StoreFileWriter writer, FileDetails fd,
+ protected List commitWriter(FileDetails fd,
CompactionRequestImpl request) throws IOException {
List newFiles = Lists.newArrayList(writer.getPath());
writer.appendMetadata(fd.maxSeqId, request.isAllFiles(), request.getFiles());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index 480b85c58dfe..4a1dc7b33a51 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -25,7 +25,6 @@
import java.util.HashSet;
import java.util.List;
import java.util.Set;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
@@ -127,8 +126,7 @@ public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
synchronized (flushLock) {
status.setStatus("Flushing " + store + ": creating writer");
// Write the map out to the disk
- writer = store.createWriterInTmp(cellsCount, store.getColumnFamilyDescriptor().getCompressionType(),
- false, true, true, false);
+ writer = createWriter(snapshot, true);
IOException e = null;
try {
// It's a mob store, flush the cells in a mob way. This is the difference of flushing
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
index f250304952a3..82c3867c103c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
@@ -110,7 +110,11 @@ public List abortWriters() {
return paths;
}
- protected abstract Collection writers();
+ /**
+ * Returns all writers. This is used to prevent deleting currently writen storefiles
+ * during cleanup.
+ */
+ public abstract Collection writers();
/**
* Subclasses override this method to be called at the end of a successful sequence of append; all
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BrokenStoreFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BrokenStoreFileCleaner.java
new file mode 100644
index 000000000000..32bb0bd8c9b5
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BrokenStoreFileCleaner.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This Chore, every time it runs, will clear the unsused HFiles in the data
+ * folder.
+ */
+@InterfaceAudience.Private
+public class BrokenStoreFileCleaner extends ScheduledChore {
+ private static final Logger LOG = LoggerFactory.getLogger(BrokenStoreFileCleaner.class);
+ public static final String BROKEN_STOREFILE_CLEANER_ENABLED =
+ "hbase.region.broken.storefilecleaner.enabled";
+ public static final boolean DEFAULT_BROKEN_STOREFILE_CLEANER_ENABLED = false;
+ public static final String BROKEN_STOREFILE_CLEANER_TTL =
+ "hbase.region.broken.storefilecleaner.ttl";
+ public static final long DEFAULT_BROKEN_STOREFILE_CLEANER_TTL = 1000 * 60 * 60 * 12; //12h
+ public static final String BROKEN_STOREFILE_CLEANER_DELAY =
+ "hbase.region.broken.storefilecleaner.delay";
+ public static final int DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY = 1000 * 60 * 60 * 2; //2h
+ public static final String BROKEN_STOREFILE_CLEANER_DELAY_JITTER =
+ "hbase.region.broken.storefilecleaner.delay.jitter";
+ public static final double DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY_JITTER = 0.25D;
+ public static final String BROKEN_STOREFILE_CLEANER_PERIOD =
+ "hbase.region.broken.storefilecleaner.period";
+ public static final int DEFAULT_BROKEN_STOREFILE_CLEANER_PERIOD = 1000 * 60 * 60 * 6; //6h
+
+ private HRegionServer regionServer;
+ private final AtomicBoolean enabled = new AtomicBoolean(true);
+ private long fileTtl;
+
+ public BrokenStoreFileCleaner(final int delay, final int period, final Stoppable stopper,
+ Configuration conf, HRegionServer regionServer) {
+ super("BrokenStoreFileCleaner", stopper, period, delay);
+ this.regionServer = regionServer;
+ setEnabled(
+ conf.getBoolean(BROKEN_STOREFILE_CLEANER_ENABLED, DEFAULT_BROKEN_STOREFILE_CLEANER_ENABLED));
+ fileTtl = conf.getLong(BROKEN_STOREFILE_CLEANER_TTL, DEFAULT_BROKEN_STOREFILE_CLEANER_TTL);
+ }
+
+ public boolean setEnabled(final boolean enabled) {
+ return this.enabled.getAndSet(enabled);
+ }
+
+ public boolean getEnabled() {
+ return this.enabled.get();
+ }
+
+ @Override
+ public void chore() {
+ if (getEnabled()) {
+ long start = EnvironmentEdgeManager.currentTime();
+ AtomicLong deletedFiles = new AtomicLong(0);
+ AtomicLong failedDeletes = new AtomicLong(0);
+ for (HRegion region : regionServer.getRegions()) {
+ for (HStore store : region.getStores()) {
+ //only do cleanup in stores not using tmp directories
+ if (store.getStoreEngine().requireWritingToTmpDirFirst()) {
+ continue;
+ }
+ Path storePath =
+ new Path(region.getRegionFileSystem().getRegionDir(), store.getColumnFamilyName());
+
+ try {
+ List fsStoreFiles =
+ Arrays.asList(region.getRegionFileSystem().fs.listStatus(storePath));
+ fsStoreFiles.forEach(
+ file -> cleanFileIfNeeded(file, store, deletedFiles, failedDeletes));
+ } catch (IOException e) {
+ LOG.warn("Failed to list files in {}, cleanup is skipped there",storePath);
+ continue;
+ }
+ }
+ }
+ LOG.debug(
+ "BrokenStoreFileCleaner on {} run for: {}ms. It deleted {} files and tried but failed "
+ + "to delete {}",
+ regionServer.getServerName().getServerName(), EnvironmentEdgeManager.currentTime() - start,
+ deletedFiles.get(), failedDeletes.get());
+ logCleanupMetrics(EnvironmentEdgeManager.currentTime() - start, deletedFiles.get(),
+ failedDeletes.get());
+ } else {
+ LOG.trace("Broken storefile Cleaner chore disabled! Not cleaning.");
+ }
+ }
+
+ private void cleanFileIfNeeded(FileStatus file, HStore store,
+ AtomicLong deletedFiles, AtomicLong failedDeletes) {
+ if(file.isDirectory()){
+ LOG.trace("This is a Directory {}, skip cleanup", file.getPath());
+ return;
+ }
+
+ if(!validate(file.getPath())){
+ LOG.trace("Invalid file {}, skip cleanup", file.getPath());
+ return;
+ }
+
+ if(!isOldEnough(file)){
+ LOG.trace("Fresh file {}, skip cleanup", file.getPath());
+ return;
+ }
+
+ if(isActiveStorefile(file, store)){
+ LOG.trace("Actively used storefile file {}, skip cleanup", file.getPath());
+ return;
+ }
+
+ // Compacted files can still have readers and are cleaned by a separate chore, so they have to
+ // be skipped here
+ if(isCompactedFile(file, store)){
+ LOG.trace("Cleanup is done by a different chore for file {}, skip cleanup", file.getPath());
+ return;
+ }
+
+ if(isCompactionResultFile(file, store)){
+ LOG.trace("The file is the result of an ongoing compaction {}, skip cleanup", file.getPath());
+ return;
+ }
+
+ deleteFile(file, store, deletedFiles, failedDeletes);
+ }
+
+ private boolean isCompactionResultFile(FileStatus file, HStore store) {
+ return store.getStoreEngine().getCompactor().getCompactionTargets().contains(file.getPath());
+ }
+
+ // Compacted files can still have readers and are cleaned by a separate chore, so they have to
+ // be skipped here
+ private boolean isCompactedFile(FileStatus file, HStore store) {
+ return store.getStoreEngine().getStoreFileManager().getCompactedfiles().stream()
+ .anyMatch(sf -> sf.getPath().equals(file.getPath()));
+ }
+
+ private boolean isActiveStorefile(FileStatus file, HStore store) {
+ return store.getStoreEngine().getStoreFileManager().getStorefiles().stream()
+ .anyMatch(sf -> sf.getPath().equals(file.getPath()));
+ }
+
+ boolean validate(Path file) {
+ if (HFileLink.isBackReferencesDir(file) || HFileLink.isBackReferencesDir(file.getParent())) {
+ return true;
+ }
+ return StoreFileInfo.validateStoreFileName(file.getName());
+ }
+
+ boolean isOldEnough(FileStatus file){
+ return file.getModificationTime() + fileTtl < EnvironmentEdgeManager.currentTime();
+ }
+
+ private void deleteFile(FileStatus file, HStore store, AtomicLong deletedFiles,
+ AtomicLong failedDeletes) {
+ Path filePath = file.getPath();
+ LOG.debug("Removing {} from store", filePath);
+ try {
+ boolean success = store.getFileSystem().delete(filePath, false);
+ if (!success) {
+ failedDeletes.incrementAndGet();
+ LOG.warn("Attempted to delete:" + filePath
+ + ", but couldn't. Attempt to delete on next pass.");
+ }
+ else{
+ deletedFiles.incrementAndGet();
+ }
+ } catch (IOException e) {
+ e = e instanceof RemoteException ?
+ ((RemoteException)e).unwrapRemoteException() : e;
+ LOG.warn("Error while deleting: " + filePath, e);
+ }
+ }
+
+ private void logCleanupMetrics(long runtime, long deletedFiles, long failedDeletes) {
+ regionServer.getMetrics().updateBrokenStoreFileCleanerTimer(runtime);
+ regionServer.getMetrics().incrementBrokenStoreFileCleanerDeletes(deletedFiles);
+ regionServer.getMetrics().incrementBrokenStoreFileCleanerFailedDeletes(failedDeletes);
+ regionServer.getMetrics().incrementBrokenStoreFileCleanerRuns();
+
+ regionServer.reportBrokenStoreFileCleanerUsage(runtime, deletedFiles, failedDeletes, true);
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CreateStoreFileWriterParams.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CreateStoreFileWriterParams.java
new file mode 100644
index 000000000000..10cd9f009e4a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CreateStoreFileWriterParams.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public final class CreateStoreFileWriterParams {
+
+ private long maxKeyCount;
+
+ private Compression.Algorithm compression;
+
+ private boolean isCompaction;
+
+ private boolean includeMVCCReadpoint;
+
+ private boolean includesTag;
+
+ private boolean shouldDropBehind;
+
+ private long totalCompactedFilesSize = -1;
+
+ private String fileStoragePolicy = HConstants.EMPTY_STRING;
+
+ private CreateStoreFileWriterParams() {
+ }
+
+ public long maxKeyCount() {
+ return maxKeyCount;
+ }
+
+ public CreateStoreFileWriterParams maxKeyCount(long maxKeyCount) {
+ this.maxKeyCount = maxKeyCount;
+ return this;
+ }
+
+ public Compression.Algorithm compression() {
+ return compression;
+ }
+
+ /**
+ * Set the compression algorithm to use
+ */
+ public CreateStoreFileWriterParams compression(Compression.Algorithm compression) {
+ this.compression = compression;
+ return this;
+ }
+
+ public boolean isCompaction() {
+ return isCompaction;
+ }
+
+ /**
+ * Whether we are creating a new file in a compaction
+ */
+ public CreateStoreFileWriterParams isCompaction(boolean isCompaction) {
+ this.isCompaction = isCompaction;
+ return this;
+ }
+
+ public boolean includeMVCCReadpoint() {
+ return includeMVCCReadpoint;
+ }
+
+ /**
+ * Whether to include MVCC or not
+ */
+ public CreateStoreFileWriterParams includeMVCCReadpoint(boolean includeMVCCReadpoint) {
+ this.includeMVCCReadpoint = includeMVCCReadpoint;
+ return this;
+ }
+
+ public boolean includesTag() {
+ return includesTag;
+ }
+
+ /**
+ * Whether to includesTag or not
+ */
+ public CreateStoreFileWriterParams includesTag(boolean includesTag) {
+ this.includesTag = includesTag;
+ return this;
+ }
+
+ public boolean shouldDropBehind() {
+ return shouldDropBehind;
+ }
+
+ public CreateStoreFileWriterParams shouldDropBehind(boolean shouldDropBehind) {
+ this.shouldDropBehind = shouldDropBehind;
+ return this;
+ }
+
+ public long totalCompactedFilesSize() {
+ return totalCompactedFilesSize;
+ }
+
+ public CreateStoreFileWriterParams totalCompactedFilesSize(long totalCompactedFilesSize) {
+ this.totalCompactedFilesSize = totalCompactedFilesSize;
+ return this;
+ }
+
+ public String fileStoragePolicy() {
+ return fileStoragePolicy;
+ }
+
+ public CreateStoreFileWriterParams fileStoragePolicy(String fileStoragePolicy) {
+ this.fileStoragePolicy = fileStoragePolicy;
+ return this;
+ }
+
+ public static CreateStoreFileWriterParams create() {
+ return new CreateStoreFileWriterParams();
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
index 8201cb152c01..1e10eb2db231 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
@@ -71,7 +71,7 @@ public void append(Cell cell) throws IOException {
}
@Override
- protected Collection writers() {
+ public Collection writers() {
return lowerBoundary2Writer.values();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index 1df953d93c96..7422d9112eab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -19,18 +19,17 @@
import java.io.IOException;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest;
import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactor;
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* HBASE-15400 This store engine allows us to store data in date tiered layout with exponential
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
index 58f8bbbb6ac5..693b9c93b9fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
@@ -20,7 +20,6 @@
import java.io.IOException;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
@@ -39,8 +38,8 @@
* their derivatives.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class DefaultStoreEngine extends StoreEngine<
- DefaultStoreFlusher, RatioBasedCompactionPolicy, DefaultCompactor, DefaultStoreFileManager> {
+public class DefaultStoreEngine extends StoreEngine {
public static final String DEFAULT_STORE_FLUSHER_CLASS_KEY =
"hbase.hstore.defaultengine.storeflusher.class";
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
index a7d7fb1f3d56..306760d7ce6a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
@@ -21,15 +21,14 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Default implementation of StoreFlusher.
@@ -60,9 +59,7 @@ public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
synchronized (flushLock) {
status.setStatus("Flushing " + store + ": creating writer");
// Write the map out to the disk
- writer = store.createWriterInTmp(cellsCount,
- store.getColumnFamilyDescriptor().getCompressionType(), false, true,
- snapshot.isTagsPresent(), false);
+ writer = createWriter(snapshot, false);
IOException e = null;
try {
performFlush(scanner, writer, throughputController);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index 7ce7f0310c7d..b00a50c522fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -28,7 +28,6 @@
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -158,7 +157,7 @@ protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo,
protected StoreEngine, ?, ?, ?> createStoreEngine(HStore store, Configuration conf,
CellComparator cellComparator) throws IOException {
MobStoreEngine engine = new MobStoreEngine();
- engine.createComponents(conf, store, cellComparator);
+ engine.createComponentsOnce(conf, store, cellComparator);
return engine;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 667eabfcd281..8920471a86ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -24,7 +24,9 @@
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.UUID;
@@ -49,6 +51,9 @@
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -145,7 +150,7 @@ public Path getRegionDir() {
// Temp Helpers
// ===========================================================================
/** @return {@link Path} to the region's temp directory, used for file creations */
- Path getTempDir() {
+ public Path getTempDir() {
return new Path(getRegionDir(), REGION_TEMP_DIR);
}
@@ -240,11 +245,7 @@ public String getStoragePolicyName(String familyName) {
* @param familyName Column Family Name
* @return a set of {@link StoreFileInfo} for the specified family.
*/
- public Collection getStoreFiles(final byte[] familyName) throws IOException {
- return getStoreFiles(Bytes.toString(familyName));
- }
-
- public Collection getStoreFiles(final String familyName) throws IOException {
+ public List getStoreFiles(final String familyName) throws IOException {
return getStoreFiles(familyName, true);
}
@@ -254,7 +255,7 @@ public Collection getStoreFiles(final String familyName) throws I
* @param familyName Column Family Name
* @return a set of {@link StoreFileInfo} for the specified family.
*/
- public Collection getStoreFiles(final String familyName, final boolean validate)
+ public List getStoreFiles(final String familyName, final boolean validate)
throws IOException {
Path familyDir = getStoreDir(familyName);
FileStatus[] files = CommonFSUtils.listStatus(this.fs, familyDir);
@@ -597,21 +598,43 @@ void cleanupDaughterRegion(final RegionInfo regionInfo) throws IOException {
* to the proper location in the filesystem.
*
* @param regionInfo daughter {@link org.apache.hadoop.hbase.client.RegionInfo}
- * @throws IOException
*/
- public Path commitDaughterRegion(final RegionInfo regionInfo)
- throws IOException {
+ public Path commitDaughterRegion(final RegionInfo regionInfo, List allRegionFiles,
+ MasterProcedureEnv env) throws IOException {
Path regionDir = this.getSplitsDir(regionInfo);
if (fs.exists(regionDir)) {
// Write HRI to a file in case we need to recover hbase:meta
Path regionInfoFile = new Path(regionDir, REGION_INFO_FILE);
byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
+ HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
+ env.getMasterConfiguration(), fs, getTableDir(), regionInfo, false);
+ insertRegionFilesIntoStoreTracker(allRegionFiles, env, regionFs);
}
-
return regionDir;
}
+ private void insertRegionFilesIntoStoreTracker(List allFiles, MasterProcedureEnv env,
+ HRegionFileSystem regionFs) throws IOException {
+ TableDescriptor tblDesc = env.getMasterServices().getTableDescriptors().
+ get(regionInfo.getTable());
+ //we need to map trackers per store
+ Map trackerMap = new HashMap<>();
+ //we need to map store files per store
+ Map> fileInfoMap = new HashMap<>();
+ for(Path file : allFiles) {
+ String familyName = file.getParent().getName();
+ trackerMap.computeIfAbsent(familyName, t -> StoreFileTrackerFactory.create(conf, tblDesc,
+ tblDesc.getColumnFamily(Bytes.toBytes(familyName)), regionFs));
+ fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>());
+ List infos = fileInfoMap.get(familyName);
+ infos.add(new StoreFileInfo(conf, fs, file, true));
+ }
+ for(Map.Entry entry : trackerMap.entrySet()) {
+ entry.getValue().add(fileInfoMap.get(entry.getKey()));
+ }
+ }
+
/**
* Creates region split daughter directories under the table dir. If the daughter regions already
* exist, for example, in the case of a recovery from a previous failed split procedure, this
@@ -648,7 +671,6 @@ public void createSplitsDir(RegionInfo daughterA, RegionInfo daughterB) throws I
* this method is invoked on the Master side, then the RegionSplitPolicy will
* NOT have a reference to a Region.
* @return Path to created reference.
- * @throws IOException
*/
public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte[] splitRow,
boolean top, RegionSplitPolicy splitPolicy) throws IOException {
@@ -799,13 +821,15 @@ public Path mergeStoreFile(RegionInfo mergingRegion, String familyName, HStoreFi
* Commit a merged region, making it ready for use.
* @throws IOException
*/
- public void commitMergedRegion() throws IOException {
+ public void commitMergedRegion(List allMergedFiles, MasterProcedureEnv env)
+ throws IOException {
Path regionDir = getMergesDir(regionInfoForFs);
if (regionDir != null && fs.exists(regionDir)) {
// Write HRI to a file in case we need to recover hbase:meta
Path regionInfoFile = new Path(regionDir, REGION_INFO_FILE);
byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
+ insertRegionFilesIntoStoreTracker(allMergedFiles, env, this);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 4bf2d9c25f1d..d8957b1aa889 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -432,6 +432,8 @@ public class HRegionServer extends HBaseServerBase
*/
final ServerNonceManager nonceManager;
+ private BrokenStoreFileCleaner brokenStoreFileCleaner;
+
@InterfaceAudience.Private
CompactedHFilesDischarger compactedFileDischarger;
@@ -458,6 +460,7 @@ public class HRegionServer extends HBaseServerBase
// A timer to shutdown the process if abort takes too long
private Timer abortMonitor;
+ private RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest unsentBrokenSFCReport;
/**
* Starts a HRegionServer at the default location.
@@ -1831,6 +1834,9 @@ private void startServices() throws IOException {
if (this.slowLogTableOpsChore != null) {
choreService.scheduleChore(slowLogTableOpsChore);
}
+ if (this.brokenStoreFileCleaner != null) {
+ choreService.scheduleChore(brokenStoreFileCleaner);
+ }
// Leases is not a Thread. Internally it runs a daemon thread. If it gets
// an unhandled exception, it will just exit.
@@ -1910,6 +1916,22 @@ private void initializeThreads() {
this.storefileRefresher = new StorefileRefresherChore(storefileRefreshPeriod,
onlyMetaRefresh, this, this);
}
+
+ int brokenStoreFileCleanerPeriod = conf.getInt(
+ BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_PERIOD,
+ BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_PERIOD);
+ int brokenStoreFileCleanerDelay = conf.getInt(
+ BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY,
+ BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY);
+ double brokenStoreFileCleanerDelayJitter = conf.getDouble(
+ BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY_JITTER,
+ BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY_JITTER);
+ double jitterRate = (RandomUtils.nextDouble() - 0.5D) * brokenStoreFileCleanerDelayJitter;
+ long jitterValue = Math.round(brokenStoreFileCleanerDelay * jitterRate);
+ this.brokenStoreFileCleaner =
+ new BrokenStoreFileCleaner((int) (brokenStoreFileCleanerDelay + jitterValue),
+ brokenStoreFileCleanerPeriod, this, conf, this);
+
registerConfigurationObservers();
}
@@ -3484,6 +3506,85 @@ protected boolean clusterMode() {
return !conf.getBoolean(MASTERLESS_CONFIG_NAME, false);
}
+ @InterfaceAudience.Private
+ public BrokenStoreFileCleaner getBrokenStoreFileCleaner(){
+ return brokenStoreFileCleaner;
+ }
+
+ /**
+ * Reports the results of a BrokenStoreFileCleaner chore run. If reporting fails stores
+ * the unsent report and tries to send with the next scheduled report.
+ *
+ * @param runtime chore runtime in milisecs
+ * @param deletedFiles number of cleaned junk files
+ * @param failedDeletes number of files the chore tried and failed to delete
+ * @return if sending the report was successful
+ */
+ public boolean reportBrokenStoreFileCleanerUsage(long runtime, long deletedFiles,
+ long failedDeletes, boolean retry) {
+ RegionServerStatusService.BlockingInterface rss = rssStub;
+ RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest request = null;
+ if (rss == null) {
+ // the current server could be stopping.
+ LOG.trace("Skipping BrokenStoreFileCleaner chore report to HMaster as stub is null");
+ return true;
+ }
+ try {
+ request =
+ buildBrokenSFCReport(runtime, deletedFiles, failedDeletes, unsentBrokenSFCReport);
+ rss.reportBrokenStoreFileCleanerUsage(null, request);
+ if(unsentBrokenSFCReport != null) {
+ unsentBrokenSFCReport = null;
+ }
+ } catch (ServiceException se) {
+ if(!retry){
+ LOG.debug("Storing unsent BrokenStoreFileCleaner chore report");
+ unsentBrokenSFCReport = request;
+ }
+
+ IOException ioe = ProtobufUtil.getRemoteException(se);
+ if (ioe instanceof PleaseHoldException) {
+ LOG.trace("Failed to report BrokenStoreFileCleaner chore results to Master because"
+ + " it is initializing.", ioe);
+ // The Master is coming up.Avoid re-creating the stub.
+ return true;
+ }
+ LOG.debug("Failed to report BrokenStoreFileCleaner chore reult to Master.", ioe);
+ if (retry) {
+ LOG.debug("Re-trying to send BrokenStoreFileCleaner chore report", ioe);
+ if (rssStub == rss) {
+ rssStub = null;
+ }
+ createRegionServerStatusStub(true);
+ return reportBrokenStoreFileCleanerUsage(runtime, deletedFiles, failedDeletes, false);
+ }
+ }
+ return true;
+ }
+
+ private RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest buildBrokenSFCReport(
+ long runtime, long deletedFiles, long failedDeletes,
+ RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest storedRequest) {
+ RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest.Builder builder;
+ if(storedRequest != null) {
+ builder =
+ RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest.newBuilder(storedRequest);
+ }
+ else {
+ builder = RegionServerStatusProtos.BrokenStoreFileCleanerUsageRequest.newBuilder();
+ }
+ builder.setServerName(getServerName().getServerName());
+ if (deletedFiles > 0) {
+ builder.setDeletedFiles(builder.getDeletedFiles() + deletedFiles);
+ }
+ if(failedDeletes > 0) {
+ builder.setFailedDeletes(builder.getFailedDeletes() + failedDeletes);
+ }
+ builder.setRuntime(runtime);
+ builder.setRuns(builder.getRuns() + 1);
+ return builder.build();
+ }
+
@Override
protected void stopChores() {
shutdownChore(nonceManagerChore);
@@ -3494,5 +3595,6 @@ protected void stopChores() {
shutdownChore(storefileRefresher);
shutdownChore(fsUtilizationChore);
shutdownChore(slowLogTableOpsChore);
+ shutdownChore(brokenStoreFileCleaner);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 0ee7b5777fc3..ba892d76c2e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import com.google.errorprone.annotations.RestrictedApi;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.InetSocketAddress;
@@ -47,8 +48,6 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.function.Predicate;
import java.util.function.ToLongFunction;
import java.util.stream.Collectors;
import java.util.stream.LongStream;
@@ -70,17 +69,12 @@
import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
import org.apache.hadoop.hbase.coprocessor.ReadOnlyConfiguration;
import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.io.compress.Compression;
-import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.quotas.RegionSizeStore;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -110,7 +104,6 @@
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
@@ -165,16 +158,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
private boolean cacheOnWriteLogged;
- /**
- * RWLock for store operations.
- * Locked in shared mode when the list of component stores is looked at:
- * - all reads/writes to table data
- * - checking for split
- * Locked in exclusive mode when the list of component stores is modified:
- * - closing
- * - completing a compaction
- */
- final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
/**
* Lock specific to archiving compacted store files. This avoids races around
* the combination of retrieving the list of compacted files and moving them to
@@ -283,14 +266,8 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
}
this.storeEngine = createStoreEngine(this, this.conf, region.getCellComparator());
- List hStoreFiles = loadStoreFiles(warmup);
- // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read
- // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and
- // update the storeSize in the refreshStoreSizeAndTotalBytes() finally (just like compaction) , so
- // no need calculate the storeSize twice.
- this.storeSize.addAndGet(getStorefilesSize(hStoreFiles, sf -> true));
- this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles));
- this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles);
+ storeEngine.initialize(warmup);
+ refreshStoreSizeAndTotalBytes();
flushRetriesNumber = conf.getInt(
"hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER);
@@ -510,105 +487,18 @@ void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) {
this.dataBlockEncoder = blockEncoder;
}
- /**
- * Creates an unsorted list of StoreFile loaded in parallel
- * from the given directory.
- */
- private List loadStoreFiles(boolean warmup) throws IOException {
- Collection files = getRegionFileSystem().getStoreFiles(getColumnFamilyName());
- return openStoreFiles(files, warmup);
- }
-
- private List openStoreFiles(Collection files, boolean warmup)
- throws IOException {
- if (CollectionUtils.isEmpty(files)) {
- return Collections.emptyList();
- }
- // initialize the thread pool for opening store files in parallel..
- ThreadPoolExecutor storeFileOpenerThreadPool =
- this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpener-"
- + this.region.getRegionInfo().getEncodedName() + "-" + this.getColumnFamilyName());
- CompletionService completionService =
- new ExecutorCompletionService<>(storeFileOpenerThreadPool);
-
- int totalValidStoreFile = 0;
- for (StoreFileInfo storeFileInfo : files) {
- // The StoreFileInfo will carry store configuration down to HFile, we need to set it to
- // our store's CompoundConfiguration here.
- storeFileInfo.setConf(conf);
- // open each store file in parallel
- completionService.submit(() -> this.createStoreFileAndReader(storeFileInfo));
- totalValidStoreFile++;
- }
-
- Set compactedStoreFiles = new HashSet<>();
- ArrayList results = new ArrayList<>(files.size());
- IOException ioe = null;
- try {
- for (int i = 0; i < totalValidStoreFile; i++) {
- try {
- HStoreFile storeFile = completionService.take().get();
- if (storeFile != null) {
- LOG.debug("loaded {}", storeFile);
- results.add(storeFile);
- compactedStoreFiles.addAll(storeFile.getCompactedStoreFiles());
- }
- } catch (InterruptedException e) {
- if (ioe == null) {
- ioe = new InterruptedIOException(e.getMessage());
- }
- } catch (ExecutionException e) {
- if (ioe == null) {
- ioe = new IOException(e.getCause());
- }
- }
- }
- } finally {
- storeFileOpenerThreadPool.shutdownNow();
- }
- if (ioe != null) {
- // close StoreFile readers
- boolean evictOnClose =
- getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true;
- for (HStoreFile file : results) {
- try {
- if (file != null) {
- file.closeStoreFile(evictOnClose);
- }
- } catch (IOException e) {
- LOG.warn("Could not close store file {}", file, e);
- }
- }
- throw ioe;
- }
-
- // Should not archive the compacted store files when region warmup. See HBASE-22163.
- if (!warmup) {
- // Remove the compacted files from result
- List filesToRemove = new ArrayList<>(compactedStoreFiles.size());
- for (HStoreFile storeFile : results) {
- if (compactedStoreFiles.contains(storeFile.getPath().getName())) {
- LOG.warn("Clearing the compacted storefile {} from {}", storeFile, this);
- storeFile.getReader().close(storeFile.getCacheConf() != null ?
- storeFile.getCacheConf().shouldEvictOnClose() : true);
- filesToRemove.add(storeFile);
- }
- }
- results.removeAll(filesToRemove);
- if (!filesToRemove.isEmpty() && this.isPrimaryReplicaStore()) {
- LOG.debug("Moving the files {} to archive", filesToRemove);
- getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(),
- filesToRemove);
- }
- }
-
- return results;
+ private void postRefreshStoreFiles() throws IOException {
+ // Advance the memstore read point to be at least the new store files seqIds so that
+ // readers might pick it up. This assumes that the store is not getting any writes (otherwise
+ // in-flight transactions might be made visible)
+ getMaxSequenceId().ifPresent(region.getMVCC()::advanceTo);
+ refreshStoreSizeAndTotalBytes();
}
@Override
public void refreshStoreFiles() throws IOException {
- Collection newFiles = getRegionFileSystem().getStoreFiles(getColumnFamilyName());
- refreshStoreFilesInternal(newFiles);
+ storeEngine.refreshStoreFiles();
+ postRefreshStoreFiles();
}
/**
@@ -616,89 +506,8 @@ public void refreshStoreFiles() throws IOException {
* region replicas to keep up to date with the primary region files.
*/
public void refreshStoreFiles(Collection newFiles) throws IOException {
- List storeFiles = new ArrayList<>(newFiles.size());
- for (String file : newFiles) {
- storeFiles.add(getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file));
- }
- refreshStoreFilesInternal(storeFiles);
- }
-
- /**
- * Checks the underlying store files, and opens the files that have not
- * been opened, and removes the store file readers for store files no longer
- * available. Mainly used by secondary region replicas to keep up to date with
- * the primary region files.
- */
- private void refreshStoreFilesInternal(Collection newFiles) throws IOException {
- StoreFileManager sfm = storeEngine.getStoreFileManager();
- Collection currentFiles = sfm.getStorefiles();
- Collection compactedFiles = sfm.getCompactedfiles();
- if (currentFiles == null) {
- currentFiles = Collections.emptySet();
- }
- if (newFiles == null) {
- newFiles = Collections.emptySet();
- }
- if (compactedFiles == null) {
- compactedFiles = Collections.emptySet();
- }
-
- HashMap currentFilesSet = new HashMap<>(currentFiles.size());
- for (HStoreFile sf : currentFiles) {
- currentFilesSet.put(sf.getFileInfo(), sf);
- }
- HashMap compactedFilesSet = new HashMap<>(compactedFiles.size());
- for (HStoreFile sf : compactedFiles) {
- compactedFilesSet.put(sf.getFileInfo(), sf);
- }
-
- Set newFilesSet = new HashSet(newFiles);
- // Exclude the files that have already been compacted
- newFilesSet = Sets.difference(newFilesSet, compactedFilesSet.keySet());
- Set toBeAddedFiles = Sets.difference(newFilesSet, currentFilesSet.keySet());
- Set toBeRemovedFiles = Sets.difference(currentFilesSet.keySet(), newFilesSet);
-
- if (toBeAddedFiles.isEmpty() && toBeRemovedFiles.isEmpty()) {
- return;
- }
-
- LOG.info("Refreshing store files for " + this + " files to add: "
- + toBeAddedFiles + " files to remove: " + toBeRemovedFiles);
-
- Set toBeRemovedStoreFiles = new HashSet<>(toBeRemovedFiles.size());
- for (StoreFileInfo sfi : toBeRemovedFiles) {
- toBeRemovedStoreFiles.add(currentFilesSet.get(sfi));
- }
-
- // try to open the files
- List openedFiles = openStoreFiles(toBeAddedFiles, false);
-
- // propogate the file changes to the underlying store file manager
- replaceStoreFiles(toBeRemovedStoreFiles, openedFiles); //won't throw an exception
-
- // Advance the memstore read point to be at least the new store files seqIds so that
- // readers might pick it up. This assumes that the store is not getting any writes (otherwise
- // in-flight transactions might be made visible)
- if (!toBeAddedFiles.isEmpty()) {
- // we must have the max sequence id here as we do have several store files
- region.getMVCC().advanceTo(this.getMaxSequenceId().getAsLong());
- }
-
- refreshStoreSizeAndTotalBytes();
- }
-
- protected HStoreFile createStoreFileAndReader(final Path p) throws IOException {
- StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(),
- p, isPrimaryReplicaStore());
- return createStoreFileAndReader(info);
- }
-
- private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException {
- info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
- HStoreFile storeFile = new HStoreFile(info, getColumnFamilyDescriptor().getBloomFilterType(),
- getCacheConfig());
- storeFile.initReader();
- return storeFile;
+ storeEngine.refreshStoreFiles(newFiles);
+ postRefreshStoreFiles();
}
/**
@@ -721,7 +530,7 @@ public void stopReplayingFromWAL(){
* Adds a value to the memstore
*/
public void add(final Cell cell, MemStoreSizing memstoreSizing) {
- lock.readLock().lock();
+ storeEngine.readLock();
try {
if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) {
LOG.trace("tableName={}, encodedName={}, columnFamilyName={} is too busy!",
@@ -729,7 +538,7 @@ public void add(final Cell cell, MemStoreSizing memstoreSizing) {
}
this.memstore.add(cell, memstoreSizing);
} finally {
- lock.readLock().unlock();
+ storeEngine.readUnlock();
currentParallelPutCount.decrementAndGet();
}
}
@@ -738,7 +547,7 @@ public void add(final Cell cell, MemStoreSizing memstoreSizing) {
* Adds the specified value to the memstore
*/
public void add(final Iterable cells, MemStoreSizing memstoreSizing) {
- lock.readLock().lock();
+ storeEngine.readLock();
try {
if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) {
LOG.trace("tableName={}, encodedName={}, columnFamilyName={} is too busy!",
@@ -746,7 +555,7 @@ public void add(final Iterable cells, MemStoreSizing memstoreSizing) {
}
memstore.add(cells, memstoreSizing);
} finally {
- lock.readLock().unlock();
+ storeEngine.readUnlock();
currentParallelPutCount.decrementAndGet();
}
}
@@ -869,17 +678,16 @@ public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws
LOG.info("Loaded HFile " + srcPath + " into " + this + " as "
+ dstPath + " - updating store file list.");
- HStoreFile sf = createStoreFileAndReader(dstPath);
+ HStoreFile sf = storeEngine.createStoreFileAndReader(dstPath);
bulkLoadHFile(sf);
- LOG.info("Successfully loaded {} into {} (new location: {})",
- srcPath, this, dstPath);
+ LOG.info("Successfully loaded {} into {} (new location: {})", srcPath, this, dstPath);
return dstPath;
}
public void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException {
- HStoreFile sf = createStoreFileAndReader(fileInfo);
+ HStoreFile sf = storeEngine.createStoreFileAndReader(fileInfo);
bulkLoadHFile(sf);
}
@@ -887,28 +695,75 @@ private void bulkLoadHFile(HStoreFile sf) throws IOException {
StoreFileReader r = sf.getReader();
this.storeSize.addAndGet(r.length());
this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());
-
- // Append the new storefile into the list
- this.lock.writeLock().lock();
- try {
- this.storeEngine.getStoreFileManager().insertNewFiles(Lists.newArrayList(sf));
- } finally {
- // We need the lock, as long as we are updating the storeFiles
- // or changing the memstore. Let us release it before calling
- // notifyChangeReadersObservers. See HBASE-4485 for a possible
- // deadlock scenario that could have happened if continue to hold
- // the lock.
- this.lock.writeLock().unlock();
- }
+ storeEngine.addStoreFiles(Lists.newArrayList(sf), () -> {
+ });
LOG.info("Loaded HFile " + sf.getFileInfo() + " into " + this);
if (LOG.isTraceEnabled()) {
- String traceMessage = "BULK LOAD time,size,store size,store files ["
- + EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize
- + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]";
+ String traceMessage = "BULK LOAD time,size,store size,store files [" +
+ EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize + "," +
+ storeEngine.getStoreFileManager().getStorefileCount() + "]";
LOG.trace(traceMessage);
}
}
+ private ImmutableCollection closeWithoutLock() throws IOException {
+ // Clear so metrics doesn't find them.
+ ImmutableCollection result = storeEngine.getStoreFileManager().clearFiles();
+ Collection compactedfiles = storeEngine.getStoreFileManager().clearCompactedFiles();
+ // clear the compacted files
+ if (CollectionUtils.isNotEmpty(compactedfiles)) {
+ removeCompactedfiles(compactedfiles,
+ getCacheConfig() != null ? getCacheConfig().shouldEvictOnClose() : true);
+ }
+ if (!result.isEmpty()) {
+ // initialize the thread pool for closing store files in parallel.
+ ThreadPoolExecutor storeFileCloserThreadPool =
+ this.region.getStoreFileOpenAndCloseThreadPool("StoreFileCloser-" +
+ this.region.getRegionInfo().getEncodedName() + "-" + this.getColumnFamilyName());
+
+ // close each store file in parallel
+ CompletionService completionService =
+ new ExecutorCompletionService<>(storeFileCloserThreadPool);
+ for (HStoreFile f : result) {
+ completionService.submit(new Callable() {
+ @Override
+ public Void call() throws IOException {
+ boolean evictOnClose =
+ getCacheConfig() != null ? getCacheConfig().shouldEvictOnClose() : true;
+ f.closeStoreFile(evictOnClose);
+ return null;
+ }
+ });
+ }
+
+ IOException ioe = null;
+ try {
+ for (int i = 0; i < result.size(); i++) {
+ try {
+ Future future = completionService.take();
+ future.get();
+ } catch (InterruptedException e) {
+ if (ioe == null) {
+ ioe = new InterruptedIOException();
+ ioe.initCause(e);
+ }
+ } catch (ExecutionException e) {
+ if (ioe == null) {
+ ioe = new IOException(e.getCause());
+ }
+ }
+ }
+ } finally {
+ storeFileCloserThreadPool.shutdownNow();
+ }
+ if (ioe != null) {
+ throw ioe;
+ }
+ }
+ LOG.trace("Closed {}", this);
+ return result;
+ }
+
/**
* Close all the readers We don't need to worry about subsequent requests because the Region holds
* a write lock that will prevent any more reads or writes.
@@ -916,67 +771,18 @@ private void bulkLoadHFile(HStoreFile sf) throws IOException {
* @throws IOException on failure
*/
public ImmutableCollection close() throws IOException {
+ // findbugs can not recognize storeEngine.writeLock is just a lock operation so it will report
+ // UL_UNRELEASED_LOCK_EXCEPTION_PATH, so here we have to use two try finally...
+ // Change later if findbugs becomes smarter in the future.
this.archiveLock.lock();
- this.lock.writeLock().lock();
try {
- // Clear so metrics doesn't find them.
- ImmutableCollection result = storeEngine.getStoreFileManager().clearFiles();
- Collection compactedfiles =
- storeEngine.getStoreFileManager().clearCompactedFiles();
- // clear the compacted files
- if (CollectionUtils.isNotEmpty(compactedfiles)) {
- removeCompactedfiles(compactedfiles, getCacheConfig() != null ?
- getCacheConfig().shouldEvictOnClose() : true);
- }
- if (!result.isEmpty()) {
- // initialize the thread pool for closing store files in parallel.
- ThreadPoolExecutor storeFileCloserThreadPool = this.region
- .getStoreFileOpenAndCloseThreadPool("StoreFileCloser-"
- + this.region.getRegionInfo().getEncodedName() + "-" + this.getColumnFamilyName());
-
- // close each store file in parallel
- CompletionService completionService =
- new ExecutorCompletionService<>(storeFileCloserThreadPool);
- for (HStoreFile f : result) {
- completionService.submit(new Callable() {
- @Override
- public Void call() throws IOException {
- boolean evictOnClose =
- getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true;
- f.closeStoreFile(evictOnClose);
- return null;
- }
- });
- }
-
- IOException ioe = null;
- try {
- for (int i = 0; i < result.size(); i++) {
- try {
- Future future = completionService.take();
- future.get();
- } catch (InterruptedException e) {
- if (ioe == null) {
- ioe = new InterruptedIOException();
- ioe.initCause(e);
- }
- } catch (ExecutionException e) {
- if (ioe == null) {
- ioe = new IOException(e.getCause());
- }
- }
- }
- } finally {
- storeFileCloserThreadPool.shutdownNow();
- }
- if (ioe != null) {
- throw ioe;
- }
+ this.storeEngine.writeLock();
+ try {
+ return closeWithoutLock();
+ } finally {
+ this.storeEngine.writeUnlock();
}
- LOG.trace("Closed {}", this);
- return result;
} finally {
- this.lock.writeLock().unlock();
this.archiveLock.unlock();
}
}
@@ -1006,7 +812,7 @@ protected List flushCache(final long logCacheFlushId, MemStoreSnapshot sna
try {
for (Path pathName : pathNames) {
lastPathName = pathName;
- validateStoreFile(pathName);
+ storeEngine.validateStoreFile(pathName);
}
return pathNames;
} catch (Exception e) {
@@ -1052,204 +858,37 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException {
}
Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path);
- HStoreFile sf = createStoreFileAndReader(dstPath);
+ HStoreFile sf = storeEngine.createStoreFileAndReader(dstPath);
StoreFileReader r = sf.getReader();
this.storeSize.addAndGet(r.length());
this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());
- this.lock.writeLock().lock();
- try {
- this.storeEngine.getStoreFileManager().insertNewFiles(Lists.newArrayList(sf));
- } finally {
- this.lock.writeLock().unlock();
- }
+ storeEngine.addStoreFiles(Lists.newArrayList(sf), () -> {
+ });
LOG.info("Loaded recovered hfile to {}, entries={}, sequenceid={}, filesize={}", sf,
r.getEntries(), r.getSequenceID(), TraditionalBinaryPrefix.long2String(r.length(), "B", 1));
return sf;
}
- /**
- * Commit the given {@code files}.
- *
- * We will move the file into data directory, and open it.
- * @param files the files want to commit
- * @param validate whether to validate the store files
- * @return the committed store files
- */
- private List commitStoreFiles(List files, boolean validate) throws IOException {
- List committedFiles = new ArrayList<>(files.size());
- HRegionFileSystem hfs = getRegionFileSystem();
- String familyName = getColumnFamilyName();
- for (Path file : files) {
- try {
- if (validate) {
- validateStoreFile(file);
- }
- Path committedPath = hfs.commitStoreFile(familyName, file);
- HStoreFile sf = createStoreFileAndReader(committedPath);
- committedFiles.add(sf);
- } catch (IOException e) {
- LOG.error("Failed to commit store file {}", file, e);
- // Try to delete the files we have committed before.
- // It is OK to fail when deleting as leaving the file there does not cause any data
- // corruption problem. It just introduces some duplicated data which may impact read
- // performance a little when reading before compaction.
- for (HStoreFile sf : committedFiles) {
- Path pathToDelete = sf.getPath();
- try {
- sf.deleteStoreFile();
- } catch (IOException deleteEx) {
- LOG.warn(HBaseMarkers.FATAL, "Failed to delete committed store file {}", pathToDelete,
- deleteEx);
- }
- }
- throw new IOException("Failed to commit the flush", e);
- }
- }
- return committedFiles;
- }
-
- public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression,
- boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag,
- boolean shouldDropBehind) throws IOException {
- return createWriterInTmp(maxKeyCount, compression, isCompaction, includeMVCCReadpoint,
- includesTag, shouldDropBehind, -1, HConstants.EMPTY_STRING);
- }
-
- /**
- * @param compression Compression algorithm to use
- * @param isCompaction whether we are creating a new file in a compaction
- * @param includeMVCCReadpoint - whether to include MVCC or not
- * @param includesTag - includesTag or not
- * @return Writer for a new StoreFile in the tmp dir.
- */
- // TODO : allow the Writer factory to create Writers of ShipperListener type only in case of
- // compaction
- public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression,
- boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag,
- boolean shouldDropBehind, long totalCompactedFilesSize, String fileStoragePolicy)
- throws IOException {
- // creating new cache config for each new writer
- final CacheConfig cacheConf = getCacheConfig();
- final CacheConfig writerCacheConf = new CacheConfig(cacheConf);
- if (isCompaction) {
- // Don't cache data on write on compactions, unless specifically configured to do so
- // Cache only when total file size remains lower than configured threshold
- final boolean cacheCompactedBlocksOnWrite =
- getCacheConfig().shouldCacheCompactedBlocksOnWrite();
- // if data blocks are to be cached on write
- // during compaction, we should forcefully
- // cache index and bloom blocks as well
- if (cacheCompactedBlocksOnWrite && totalCompactedFilesSize <= cacheConf
- .getCacheCompactedBlocksOnWriteThreshold()) {
- writerCacheConf.enableCacheOnWrite();
- if (!cacheOnWriteLogged) {
- LOG.info("For {} , cacheCompactedBlocksOnWrite is true, hence enabled " +
- "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", this);
- cacheOnWriteLogged = true;
- }
- } else {
- writerCacheConf.setCacheDataOnWrite(false);
- if (totalCompactedFilesSize > cacheConf.getCacheCompactedBlocksOnWriteThreshold()) {
- // checking condition once again for logging
- LOG.debug(
- "For {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted "
- + "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}",
- this, totalCompactedFilesSize,
- cacheConf.getCacheCompactedBlocksOnWriteThreshold());
- }
- }
- } else {
- final boolean shouldCacheDataOnWrite = cacheConf.shouldCacheDataOnWrite();
- if (shouldCacheDataOnWrite) {
- writerCacheConf.enableCacheOnWrite();
- if (!cacheOnWriteLogged) {
- LOG.info("For {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " +
- "Index blocks and Bloom filter blocks", this);
- cacheOnWriteLogged = true;
- }
- }
- }
- Encryption.Context encryptionContext = storeContext.getEncryptionContext();
- HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag,
- encryptionContext);
- Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName());
- StoreFileWriter.Builder builder =
- new StoreFileWriter.Builder(conf, writerCacheConf, getFileSystem())
- .withOutputDir(familyTempDir)
- .withBloomType(storeContext.getBloomFilterType())
- .withMaxKeyCount(maxKeyCount)
- .withFavoredNodes(storeContext.getFavoredNodes())
- .withFileContext(hFileContext)
- .withShouldDropCacheBehind(shouldDropBehind)
- .withCompactedFilesSupplier(storeContext.getCompactedFilesSupplier())
- .withFileStoragePolicy(fileStoragePolicy);
- return builder.build();
- }
-
- private HFileContext createFileContext(Compression.Algorithm compression,
- boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) {
- if (compression == null) {
- compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
- }
- ColumnFamilyDescriptor family = getColumnFamilyDescriptor();
- HFileContext hFileContext = new HFileContextBuilder()
- .withIncludesMvcc(includeMVCCReadpoint)
- .withIncludesTags(includesTag)
- .withCompression(compression)
- .withCompressTags(family.isCompressTags())
- .withChecksumType(StoreUtils.getChecksumType(conf))
- .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf))
- .withBlockSize(family.getBlocksize())
- .withHBaseCheckSum(true)
- .withDataBlockEncoding(family.getDataBlockEncoding())
- .withEncryptionContext(encryptionContext)
- .withCreateTime(EnvironmentEdgeManager.currentTime())
- .withColumnFamily(getColumnFamilyDescriptor().getName())
- .withTableName(getTableName().getName())
- .withCellComparator(getComparator())
- .build();
- return hFileContext;
- }
-
private long getTotalSize(Collection sfs) {
return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum();
}
- /**
- * Change storeFiles adding into place the Reader produced by this new flush.
- * @param sfs Store files
- * @return Whether compaction is required.
- */
- private boolean updateStorefiles(List sfs, long snapshotId) throws IOException {
- this.lock.writeLock().lock();
- try {
- this.storeEngine.getStoreFileManager().insertNewFiles(sfs);
- /**
- * NOTE:we should keep clearSnapshot method inside the write lock because clearSnapshot may
- * close {@link DefaultMemStore#snapshot}, which may be used by
- * {@link DefaultMemStore#getScanners}.
- */
- if (snapshotId > 0) {
- this.memstore.clearSnapshot(snapshotId);
- }
- } finally {
- // We need the lock, as long as we are updating the storeFiles
- // or changing the memstore. Let us release it before calling
- // notifyChangeReadersObservers. See HBASE-4485 for a possible
- // deadlock scenario that could have happened if continue to hold
- // the lock.
- this.lock.writeLock().unlock();
- }
-
+ private boolean completeFlush(List sfs, long snapshotId) throws IOException {
+ // NOTE:we should keep clearSnapshot method inside the write lock because clearSnapshot may
+ // close {@link DefaultMemStore#snapshot}, which may be used by
+ // {@link DefaultMemStore#getScanners}.
+ storeEngine.addStoreFiles(sfs,
+ snapshotId > 0 ? () -> this.memstore.clearSnapshot(snapshotId) : () -> {
+ });
// notify to be called here - only in case of flushes
notifyChangedReadersObservers(sfs);
if (LOG.isTraceEnabled()) {
long totalSize = getTotalSize(sfs);
- String traceMessage = "FLUSH time,count,size,store size,store files ["
- + EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize
- + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]";
+ String traceMessage = "FLUSH time,count,size,store size,store files [" +
+ EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize + "," +
+ storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]";
LOG.trace(traceMessage);
}
return needsCompaction();
@@ -1261,11 +900,11 @@ private boolean updateStorefiles(List sfs, long snapshotId) throws I
private void notifyChangedReadersObservers(List sfs) throws IOException {
for (ChangedReadersObserver o : this.changedReaderObservers) {
List memStoreScanners;
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
memStoreScanners = this.memstore.getScanners(o.getReadPoint());
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
o.updateReaders(sfs, memStoreScanners);
}
@@ -1307,13 +946,13 @@ public List getScanners(boolean cacheBlocks, boolean usePread,
byte[] stopRow, boolean includeStopRow, long readPt) throws IOException {
Collection storeFilesToScan;
List memStoreScanners;
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
storeFilesToScan = this.storeEngine.getStoreFileManager().getFilesForScan(startRow,
includeStartRow, stopRow, includeStopRow);
memStoreScanners = this.memstore.getScanners(readPt);
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
try {
@@ -1390,11 +1029,11 @@ public List getScanners(List files, boolean cacheBl
boolean includeMemstoreScanner) throws IOException {
List memStoreScanners = null;
if (includeMemstoreScanner) {
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
memStoreScanners = this.memstore.getScanners(readPt);
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
}
try {
@@ -1510,14 +1149,19 @@ protected List doCompaction(CompactionRequestImpl cr,
List newFiles) throws IOException {
// Do the steps necessary to complete the compaction.
setStoragePolicyFromFileName(newFiles);
- List sfs = commitStoreFiles(newFiles, true);
+ List sfs = storeEngine.commitStoreFiles(newFiles, true);
if (this.getCoprocessorHost() != null) {
for (HStoreFile sf : sfs) {
getCoprocessorHost().postCompact(this, sf, cr.getTracker(), cr, user);
}
}
- writeCompactionWalRecord(filesToCompact, sfs);
- replaceStoreFiles(filesToCompact, sfs);
+ replaceStoreFiles(filesToCompact, sfs, true);
+
+ // This step is necessary for the correctness of BrokenStoreFileCleanerChore. It lets the
+ // CleanerChore know that compaction is done and the file can be cleaned up if compaction
+ // have failed.
+ storeEngine.resetCompactionWriter();
+
if (cr.isMajor()) {
majorCompactedCellsCount.addAndGet(getCompactionProgress().getTotalCompactingKVs());
majorCompactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize);
@@ -1581,25 +1225,24 @@ private void writeCompactionWalRecord(Collection filesCompacted,
this.region.getRegionInfo(), compactionDescriptor, this.region.getMVCC());
}
- void replaceStoreFiles(Collection compactedFiles, Collection result)
- throws IOException {
- this.lock.writeLock().lock();
- try {
- this.storeEngine.getStoreFileManager().addCompactionResults(compactedFiles, result);
- synchronized (filesCompacting) {
- filesCompacting.removeAll(compactedFiles);
- }
-
- // These may be null when the RS is shutting down. The space quota Chores will fix the Region
- // sizes later so it's not super-critical if we miss these.
- RegionServerServices rsServices = region.getRegionServerServices();
- if (rsServices != null && rsServices.getRegionServerSpaceQuotaManager() != null) {
- updateSpaceQuotaAfterFileReplacement(
- rsServices.getRegionServerSpaceQuotaManager().getRegionSizeStore(), getRegionInfo(),
- compactedFiles, result);
- }
- } finally {
- this.lock.writeLock().unlock();
+ @RestrictedApi(explanation = "Should only be called in TestHStore", link = "",
+ allowedOnPath = ".*/(HStore|TestHStore).java")
+ void replaceStoreFiles(Collection compactedFiles, Collection result,
+ boolean writeCompactionMarker) throws IOException {
+ storeEngine.replaceStoreFiles(compactedFiles, result);
+ if (writeCompactionMarker) {
+ writeCompactionWalRecord(compactedFiles, result);
+ }
+ synchronized (filesCompacting) {
+ filesCompacting.removeAll(compactedFiles);
+ }
+ // These may be null when the RS is shutting down. The space quota Chores will fix the Region
+ // sizes later so it's not super-critical if we miss these.
+ RegionServerServices rsServices = region.getRegionServerServices();
+ if (rsServices != null && rsServices.getRegionServerSpaceQuotaManager() != null) {
+ updateSpaceQuotaAfterFileReplacement(
+ rsServices.getRegionServerSpaceQuotaManager().getRegionSizeStore(), getRegionInfo(),
+ compactedFiles, result);
}
}
@@ -1722,7 +1365,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick
for (String compactionOutput : compactionOutputs) {
StoreFileInfo storeFileInfo =
getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput);
- HStoreFile storeFile = createStoreFileAndReader(storeFileInfo);
+ HStoreFile storeFile = storeEngine.createStoreFileAndReader(storeFileInfo);
outputStoreFiles.add(storeFile);
}
}
@@ -1730,7 +1373,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick
if (!inputStoreFiles.isEmpty() || !outputStoreFiles.isEmpty()) {
LOG.info("Replaying compaction marker, replacing input files: " +
inputStoreFiles + " with output files : " + outputStoreFiles);
- this.replaceStoreFiles(inputStoreFiles, outputStoreFiles);
+ this.replaceStoreFiles(inputStoreFiles, outputStoreFiles, false);
this.refreshStoreSizeAndTotalBytes();
}
}
@@ -1739,14 +1382,14 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick
public boolean hasReferences() {
// Grab the read lock here, because we need to ensure that: only when the atomic
// replaceStoreFiles(..) finished, we can get all the complete store file list.
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
// Merge the current store files with compacted files here due to HBASE-20940.
Collection allStoreFiles = new ArrayList<>(getStorefiles());
allStoreFiles.addAll(getCompactedFiles());
return StoreUtils.hasReferences(allStoreFiles);
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
}
@@ -1786,7 +1429,7 @@ public Optional requestCompaction(int priority,
final CompactionContext compaction = storeEngine.createCompaction();
CompactionRequestImpl request = null;
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
synchronized (filesCompacting) {
// First, see if coprocessor would want to override selection.
@@ -1859,7 +1502,7 @@ public Optional requestCompaction(int priority,
request.setTracker(tracker);
}
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
if (LOG.isDebugEnabled()) {
@@ -1892,7 +1535,7 @@ private void removeUnneededFiles() throws IOException {
this, getColumnFamilyDescriptor().getMinVersions());
return;
}
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
Collection delSfs = null;
try {
synchronized (filesCompacting) {
@@ -1904,7 +1547,7 @@ private void removeUnneededFiles() throws IOException {
}
}
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
if (CollectionUtils.isEmpty(delSfs)) {
@@ -1912,8 +1555,7 @@ private void removeUnneededFiles() throws IOException {
}
Collection newFiles = Collections.emptyList(); // No new files.
- writeCompactionWalRecord(delSfs, newFiles);
- replaceStoreFiles(delSfs, newFiles);
+ replaceStoreFiles(delSfs, newFiles, true);
refreshStoreSizeAndTotalBytes();
LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in "
+ this + "; total size is "
@@ -1935,25 +1577,6 @@ protected void finishCompactionRequest(CompactionRequestImpl cr) {
}
}
- /**
- * Validates a store file by opening and closing it. In HFileV2 this should not be an expensive
- * operation.
- * @param path the path to the store file
- */
- private void validateStoreFile(Path path) throws IOException {
- HStoreFile storeFile = null;
- try {
- storeFile = createStoreFileAndReader(path);
- } catch (IOException e) {
- LOG.error("Failed to open store file : {}, keeping it in tmp location", path, e);
- throw e;
- } finally {
- if (storeFile != null) {
- storeFile.closeStoreFile(false);
- }
- }
- }
-
/**
* Update counts.
*/
@@ -1999,7 +1622,7 @@ public boolean canSplit() {
* Determines if Store should be split.
*/
public Optional getSplitPoint() {
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
// Should already be enforced by the split policy!
assert !this.getRegionInfo().isMetaRegion();
@@ -2012,7 +1635,7 @@ public Optional getSplitPoint() {
} catch(IOException e) {
LOG.warn("Failed getting store size for {}", this, e);
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
return Optional.empty();
}
@@ -2045,7 +1668,7 @@ public void triggerMajorCompaction() {
*/
public KeyValueScanner getScanner(Scan scan, final NavigableSet targetCols, long readPt)
throws IOException {
- lock.readLock().lock();
+ storeEngine.readLock();
try {
ScanInfo scanInfo;
if (this.getCoprocessorHost() != null) {
@@ -2055,7 +1678,7 @@ public KeyValueScanner getScanner(Scan scan, final NavigableSet targetCo
}
return createScanner(scan, scanInfo, targetCols, readPt);
} finally {
- lock.readLock().unlock();
+ storeEngine.readUnlock();
}
}
@@ -2085,7 +1708,7 @@ public List recreateScanners(List currentFileS
boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
byte[] startRow, boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt,
boolean includeMemstoreScanner) throws IOException {
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
Map name2File =
new HashMap<>(getStorefilesCount() + getCompactedFilesCount());
@@ -2110,7 +1733,7 @@ public List recreateScanners(List currentFileS
return getScanners(filesToReopen, cacheBlocks, false, false, matcher, startRow,
includeStartRow, stopRow, includeStopRow, readPt, false);
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
}
@@ -2176,41 +1799,20 @@ public long getStoreSizeUncompressed() {
@Override
public long getStorefilesSize() {
// Include all StoreFiles
- return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), sf -> true);
+ return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(),
+ sf -> true);
}
@Override
public long getHFilesSize() {
// Include only StoreFiles which are HFiles
- return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(),
+ return StoreUtils.getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(),
HStoreFile::isHFile);
}
- private long getTotalUncompressedBytes(List files) {
- return files.stream()
- .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::getTotalUncompressedBytes))
- .sum();
- }
-
- private long getStorefilesSize(Collection files, Predicate predicate) {
- return files.stream().filter(predicate)
- .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::length)).sum();
- }
-
- private long getStorefileFieldSize(HStoreFile file, ToLongFunction f) {
- if (file == null) {
- return 0L;
- }
- StoreFileReader reader = file.getReader();
- if (reader == null) {
- return 0L;
- }
- return f.applyAsLong(reader);
- }
-
private long getStorefilesFieldSize(ToLongFunction f) {
return this.storeEngine.getStoreFileManager().getStorefiles().stream()
- .mapToLong(file -> getStorefileFieldSize(file, f)).sum();
+ .mapToLong(file -> StoreUtils.getStorefileFieldSize(file, f)).sum();
}
@Override
@@ -2281,11 +1883,11 @@ public long getSmallestReadPoint() {
*/
public void upsert(Iterable cells, long readpoint, MemStoreSizing memstoreSizing)
throws IOException {
- this.lock.readLock().lock();
+ this.storeEngine.readLock();
try {
this.memstore.upsert(cells, readpoint, memstoreSizing);
} finally {
- this.lock.readLock().unlock();
+ this.storeEngine.readUnlock();
}
}
@@ -2338,7 +1940,7 @@ public boolean commit(MonitoredTask status) throws IOException {
return false;
}
status.setStatus("Flushing " + this + ": reopening flushed file");
- List storeFiles = commitStoreFiles(tempFiles, false);
+ List storeFiles = storeEngine.commitStoreFiles(tempFiles, false);
for (HStoreFile sf : storeFiles) {
StoreFileReader r = sf.getReader();
if (LOG.isInfoEnabled()) {
@@ -2361,7 +1963,7 @@ public boolean commit(MonitoredTask status) throws IOException {
}
}
// Add new file to store files. Clear snapshot too while we have the Store write lock.
- return updateStorefiles(storeFiles, snapshot.getId());
+ return completeFlush(storeFiles, snapshot.getId());
}
@Override
@@ -2389,7 +1991,7 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot)
// open the file as a store file (hfile link, etc)
StoreFileInfo storeFileInfo =
getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file);
- HStoreFile storeFile = createStoreFileAndReader(storeFileInfo);
+ HStoreFile storeFile = storeEngine.createStoreFileAndReader(storeFileInfo);
storeFiles.add(storeFile);
HStore.this.storeSize.addAndGet(storeFile.getReader().length());
HStore.this.totalUncompressedBytes
@@ -2406,7 +2008,7 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot)
snapshotId = snapshot.getId();
snapshot.close();
}
- HStore.this.updateStorefiles(storeFiles, snapshotId);
+ HStore.this.completeFlush(storeFiles, snapshotId);
}
/**
@@ -2419,7 +2021,7 @@ public void abort() throws IOException {
//won't be closed. If we are using MSLAB, the chunk referenced by those scanners
//can't be released, thus memory leak
snapshot.close();
- HStore.this.updateStorefiles(Collections.emptyList(), snapshot.getId());
+ HStore.this.completeFlush(Collections.emptyList(), snapshot.getId());
}
}
}
@@ -2582,7 +2184,7 @@ public synchronized void closeAndArchiveCompactedFiles() throws IOException {
// ensure other threads do not attempt to archive the same files on close()
archiveLock.lock();
try {
- lock.readLock().lock();
+ storeEngine.readLock();
Collection copyCompactedfiles = null;
try {
Collection compactedfiles =
@@ -2594,7 +2196,7 @@ public synchronized void closeAndArchiveCompactedFiles() throws IOException {
LOG.trace("No compacted files to archive");
}
} finally {
- lock.readLock().unlock();
+ storeEngine.readUnlock();
}
if (CollectionUtils.isNotEmpty(copyCompactedfiles)) {
removeCompactedfiles(copyCompactedfiles, true);
@@ -2729,12 +2331,7 @@ public boolean isSloppyMemStore() {
private void clearCompactedfiles(List filesToRemove) throws IOException {
LOG.trace("Clearing the compacted file {} from this store", filesToRemove);
- try {
- lock.writeLock().lock();
- this.getStoreEngine().getStoreFileManager().removeCompactedFiles(filesToRemove);
- } finally {
- lock.writeLock().unlock();
- }
+ storeEngine.removeCompactedFiles(filesToRemove);
}
void reportArchivedFilesForQuota(List extends StoreFile> archivedFiles, List fileSizes) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
index 07a90f6db950..c1a277ca56c8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
@@ -54,6 +54,7 @@ public class MetricsRegionServer {
private final MetricsTable metricsTable;
private MetricsRegionServerQuotaSource quotaSource;
private final MetricsUserAggregate userAggregate;
+ private final MetricsBrokenStoreFileCleaner brokenSFC;
private MetricRegistry metricRegistry;
private Timer bulkLoadTimer;
@@ -69,7 +70,8 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi
this(regionServerWrapper,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.createServer(regionServerWrapper), createTableMetrics(conf), metricsTable,
- MetricsUserAggregateFactory.getMetricsUserAggregate(conf));
+ MetricsUserAggregateFactory.getMetricsUserAggregate(conf),
+ CompatibilitySingletonFactory.getInstance(MetricsBrokenStoreFileCleaner.class));
// Create hbase-metrics module based metrics. The registry should already be registered by the
// MetricsRegionServerSource
@@ -88,13 +90,15 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi
}
MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
- MetricsRegionServerSource serverSource, RegionServerTableMetrics tableMetrics,
- MetricsTable metricsTable, MetricsUserAggregate userAggregate) {
+ MetricsRegionServerSource serverSource, RegionServerTableMetrics tableMetrics,
+ MetricsTable metricsTable, MetricsUserAggregate userAggregate,
+ MetricsBrokenStoreFileCleaner brokenSFC) {
this.regionServerWrapper = regionServerWrapper;
this.serverSource = serverSource;
this.tableMetrics = tableMetrics;
this.metricsTable = metricsTable;
this.userAggregate = userAggregate;
+ this.brokenSFC = brokenSFC;
}
/**
@@ -317,4 +321,20 @@ public void updateWriteQueryMeter(TableName tn) {
serverWriteQueryMeter.mark();
}
}
+
+ public void incrementBrokenStoreFileCleanerDeletes(long deletes) {
+ brokenSFC.incrementBrokenStoreFileCleanerDeletes(deletes);
+ }
+
+ public void incrementBrokenStoreFileCleanerFailedDeletes(long failedDeletes) {
+ brokenSFC.incrementBrokenStoreFileCleanerFailedDeletes(failedDeletes);
+ }
+
+ public void incrementBrokenStoreFileCleanerRuns() {
+ brokenSFC.incrementBrokenStoreFileCleanerRuns();
+ }
+
+ public void updateBrokenStoreFileCleanerTimer(long milis) {
+ brokenSFC.updateBrokenStoreFileCleanerTimer(milis);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java
index 26233505db73..588f8f4027a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java
@@ -22,7 +22,9 @@
import java.util.function.Supplier;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -108,6 +110,18 @@ public RegionCoprocessorHost getCoprocessorHost() {
return coprocessorHost;
}
+ public TableName getTableName() {
+ return getRegionInfo().getTable();
+ }
+
+ public RegionInfo getRegionInfo() {
+ return regionFileSystem.getRegionInfo();
+ }
+
+ public boolean isPrimaryReplicaStore() {
+ return getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID;
+ }
+
public static Builder getBuilder() {
return new Builder();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
index 60b3c3d0d20f..ddb52d10ffd5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
@@ -19,38 +19,133 @@
package org.apache.hadoop.hbase.regionserver;
+import com.google.errorprone.annotations.RestrictedApi;
import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
-
+import java.util.Set;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Function;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
+import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
/**
- * StoreEngine is a factory that can create the objects necessary for HStore to operate.
- * Since not all compaction policies, compactors and store file managers are compatible,
- * they are tied together and replaced together via StoreEngine-s.
+ * StoreEngine is a factory that can create the objects necessary for HStore to operate. Since not
+ * all compaction policies, compactors and store file managers are compatible, they are tied
+ * together and replaced together via StoreEngine-s.
+ *
+ * We expose read write lock methods to upper layer for store operations:
+ *
+ * - Locked in shared mode when the list of component stores is looked at:
+ *
+ * - all reads/writes to table data
+ * - checking for split
+ *
+ *
+ * - Locked in exclusive mode when the list of component stores is modified:
+ *
+ * - closing
+ * - completing a compaction
+ *
+ *
+ *
+ *
+ * It is a bit confusing that we have a StoreFileManager(SFM) and then a StoreFileTracker(SFT). As
+ * its name says, SFT is used to track the store files list. The reason why we have a SFT beside SFM
+ * is that, when introducing stripe compaction, we introduced the StoreEngine and also the SFM, but
+ * actually, the SFM here is not a general 'Manager', it is only designed to manage the in memory
+ * 'stripes', so we can select different store files when scanning or compacting. The 'tracking' of
+ * store files is actually done in {@link org.apache.hadoop.hbase.regionserver.HRegionFileSystem}
+ * and {@link HStore} before we have SFT. And since SFM is designed to only holds in memory states,
+ * we will hold write lock when updating it, the lock is also used to protect the normal read/write
+ * requests. This means we'd better not add IO operations to SFM. And also, no matter what the in
+ * memory state is, stripe or not, it does not effect how we track the store files. So consider all
+ * these facts, here we introduce a separated SFT to track the store files.
+ *
+ * Here, since we always need to update SFM and SFT almost at the same time, we introduce methods in
+ * StoreEngine directly to update them both, so upper layer just need to update StoreEngine once, to
+ * reduce the possible misuse.
*/
@InterfaceAudience.Private
-public abstract class StoreEngine {
+public abstract class StoreEngine {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StoreEngine.class);
+
protected SF storeFlusher;
protected CP compactionPolicy;
protected C compactor;
protected SFM storeFileManager;
+ private Configuration conf;
+ private StoreContext ctx;
+ private RegionCoprocessorHost coprocessorHost;
+ private Function openStoreFileThreadPoolCreator;
+ private StoreFileTracker storeFileTracker;
+
+ private final ReadWriteLock storeLock = new ReentrantReadWriteLock();
/**
- * The name of the configuration parameter that specifies the class of
- * a store engine that is used to manage and compact HBase store files.
+ * The name of the configuration parameter that specifies the class of a store engine that is used
+ * to manage and compact HBase store files.
*/
public static final String STORE_ENGINE_CLASS_KEY = "hbase.hstore.engine.class";
- private static final Class extends StoreEngine, ?, ?, ?>>
- DEFAULT_STORE_ENGINE_CLASS = DefaultStoreEngine.class;
+ private static final Class extends StoreEngine, ?, ?, ?>> DEFAULT_STORE_ENGINE_CLASS =
+ DefaultStoreEngine.class;
+
+ /**
+ * Acquire read lock of this store.
+ */
+ public void readLock() {
+ storeLock.readLock().lock();
+ }
+
+ /**
+ * Release read lock of this store.
+ */
+ public void readUnlock() {
+ storeLock.readLock().unlock();
+ }
+
+ /**
+ * Acquire write lock of this store.
+ */
+ public void writeLock() {
+ storeLock.writeLock().lock();
+ }
+
+ /**
+ * Release write lock of this store.
+ */
+ public void writeUnlock() {
+ storeLock.writeLock().unlock();
+ }
/**
* @return Compaction policy to use.
@@ -80,6 +175,11 @@ public StoreFlusher getStoreFlusher() {
return this.storeFlusher;
}
+ private StoreFileTracker createStoreFileTracker(Configuration conf, HStore store) {
+ return StoreFileTrackerFactory.create(conf, store.isPrimaryReplicaStore(),
+ store.getStoreContext());
+ }
+
/**
* @param filesCompacting Files currently compacting
* @return whether a compaction selection is possible
@@ -87,8 +187,8 @@ public StoreFlusher getStoreFlusher() {
public abstract boolean needsCompaction(List filesCompacting);
/**
- * Creates an instance of a compaction context specific to this engine.
- * Doesn't actually select or start a compaction. See CompactionContext class comment.
+ * Creates an instance of a compaction context specific to this engine. Doesn't actually select or
+ * start a compaction. See CompactionContext class comment.
* @return New CompactionContext object.
*/
public abstract CompactionContext createCompaction() throws IOException;
@@ -96,36 +196,366 @@ public StoreFlusher getStoreFlusher() {
/**
* Create the StoreEngine's components.
*/
- protected abstract void createComponents(
- Configuration conf, HStore store, CellComparator cellComparator) throws IOException;
+ protected abstract void createComponents(Configuration conf, HStore store,
+ CellComparator cellComparator) throws IOException;
- private void createComponentsOnce(
- Configuration conf, HStore store, CellComparator cellComparator) throws IOException {
- assert compactor == null && compactionPolicy == null
- && storeFileManager == null && storeFlusher == null;
+ protected final void createComponentsOnce(Configuration conf, HStore store,
+ CellComparator cellComparator) throws IOException {
+ assert compactor == null && compactionPolicy == null && storeFileManager == null &&
+ storeFlusher == null && storeFileTracker == null;
createComponents(conf, store, cellComparator);
- assert compactor != null && compactionPolicy != null
- && storeFileManager != null && storeFlusher != null;
+ this.conf = conf;
+ this.ctx = store.getStoreContext();
+ this.coprocessorHost = store.getHRegion().getCoprocessorHost();
+ this.openStoreFileThreadPoolCreator = store.getHRegion()::getStoreFileOpenAndCloseThreadPool;
+ this.storeFileTracker = createStoreFileTracker(conf, store);
+ assert compactor != null && compactionPolicy != null && storeFileManager != null &&
+ storeFlusher != null && storeFileTracker != null;
+ }
+
+ /**
+ * Create a writer for writing new store files.
+ * @return Writer for a new StoreFile
+ */
+ public StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws IOException {
+ return storeFileTracker.createWriter(params);
+ }
+
+ public HStoreFile createStoreFileAndReader(Path p) throws IOException {
+ StoreFileInfo info = new StoreFileInfo(conf, ctx.getRegionFileSystem().getFileSystem(), p,
+ ctx.isPrimaryReplicaStore());
+ return createStoreFileAndReader(info);
+ }
+
+ public HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException {
+ info.setRegionCoprocessorHost(coprocessorHost);
+ HStoreFile storeFile =
+ new HStoreFile(info, ctx.getFamily().getBloomFilterType(), ctx.getCacheConf());
+ storeFile.initReader();
+ return storeFile;
+ }
+
+ /**
+ * Validates a store file by opening and closing it. In HFileV2 this should not be an expensive
+ * operation.
+ * @param path the path to the store file
+ */
+ public void validateStoreFile(Path path) throws IOException {
+ HStoreFile storeFile = null;
+ try {
+ storeFile = createStoreFileAndReader(path);
+ } catch (IOException e) {
+ LOG.error("Failed to open store file : {}, keeping it in tmp location", path, e);
+ throw e;
+ } finally {
+ if (storeFile != null) {
+ storeFile.closeStoreFile(false);
+ }
+ }
+ }
+
+ private List openStoreFiles(Collection files, boolean warmup)
+ throws IOException {
+ if (CollectionUtils.isEmpty(files)) {
+ return Collections.emptyList();
+ }
+ // initialize the thread pool for opening store files in parallel..
+ ExecutorService storeFileOpenerThreadPool =
+ openStoreFileThreadPoolCreator.apply("StoreFileOpener-" +
+ ctx.getRegionInfo().getEncodedName() + "-" + ctx.getFamily().getNameAsString());
+ CompletionService completionService =
+ new ExecutorCompletionService<>(storeFileOpenerThreadPool);
+
+ int totalValidStoreFile = 0;
+ for (StoreFileInfo storeFileInfo : files) {
+ // The StoreFileInfo will carry store configuration down to HFile, we need to set it to
+ // our store's CompoundConfiguration here.
+ storeFileInfo.setConf(conf);
+ // open each store file in parallel
+ completionService.submit(() -> createStoreFileAndReader(storeFileInfo));
+ totalValidStoreFile++;
+ }
+
+ Set compactedStoreFiles = new HashSet<>();
+ ArrayList results = new ArrayList<>(files.size());
+ IOException ioe = null;
+ try {
+ for (int i = 0; i < totalValidStoreFile; i++) {
+ try {
+ HStoreFile storeFile = completionService.take().get();
+ if (storeFile != null) {
+ LOG.debug("loaded {}", storeFile);
+ results.add(storeFile);
+ compactedStoreFiles.addAll(storeFile.getCompactedStoreFiles());
+ }
+ } catch (InterruptedException e) {
+ if (ioe == null) {
+ ioe = new InterruptedIOException(e.getMessage());
+ }
+ } catch (ExecutionException e) {
+ if (ioe == null) {
+ ioe = new IOException(e.getCause());
+ }
+ }
+ }
+ } finally {
+ storeFileOpenerThreadPool.shutdownNow();
+ }
+ if (ioe != null) {
+ // close StoreFile readers
+ boolean evictOnClose =
+ ctx.getCacheConf() != null ? ctx.getCacheConf().shouldEvictOnClose() : true;
+ for (HStoreFile file : results) {
+ try {
+ if (file != null) {
+ file.closeStoreFile(evictOnClose);
+ }
+ } catch (IOException e) {
+ LOG.warn("Could not close store file {}", file, e);
+ }
+ }
+ throw ioe;
+ }
+
+ // Should not archive the compacted store files when region warmup. See HBASE-22163.
+ if (!warmup) {
+ // Remove the compacted files from result
+ List filesToRemove = new ArrayList<>(compactedStoreFiles.size());
+ for (HStoreFile storeFile : results) {
+ if (compactedStoreFiles.contains(storeFile.getPath().getName())) {
+ LOG.warn("Clearing the compacted storefile {} from {}", storeFile, this);
+ storeFile.getReader().close(
+ storeFile.getCacheConf() != null ? storeFile.getCacheConf().shouldEvictOnClose() :
+ true);
+ filesToRemove.add(storeFile);
+ }
+ }
+ results.removeAll(filesToRemove);
+ if (!filesToRemove.isEmpty() && ctx.isPrimaryReplicaStore()) {
+ LOG.debug("Moving the files {} to archive", filesToRemove);
+ ctx.getRegionFileSystem().removeStoreFiles(ctx.getFamily().getNameAsString(),
+ filesToRemove);
+ }
+ }
+
+ return results;
+ }
+
+ public void initialize(boolean warmup) throws IOException {
+ List fileInfos = storeFileTracker.load();
+ List files = openStoreFiles(fileInfos, warmup);
+ storeFileManager.loadFiles(files);
+ }
+
+ public void refreshStoreFiles() throws IOException {
+ List fileInfos = storeFileTracker.load();
+ refreshStoreFilesInternal(fileInfos);
+ }
+
+ public void refreshStoreFiles(Collection newFiles) throws IOException {
+ List storeFiles = new ArrayList<>(newFiles.size());
+ for (String file : newFiles) {
+ storeFiles
+ .add(ctx.getRegionFileSystem().getStoreFileInfo(ctx.getFamily().getNameAsString(), file));
+ }
+ refreshStoreFilesInternal(storeFiles);
+ }
+
+ /**
+ * Checks the underlying store files, and opens the files that have not been opened, and removes
+ * the store file readers for store files no longer available. Mainly used by secondary region
+ * replicas to keep up to date with the primary region files.
+ */
+ private void refreshStoreFilesInternal(Collection newFiles) throws IOException {
+ Collection currentFiles = storeFileManager.getStorefiles();
+ Collection compactedFiles = storeFileManager.getCompactedfiles();
+ if (currentFiles == null) {
+ currentFiles = Collections.emptySet();
+ }
+ if (newFiles == null) {
+ newFiles = Collections.emptySet();
+ }
+ if (compactedFiles == null) {
+ compactedFiles = Collections.emptySet();
+ }
+
+ HashMap currentFilesSet = new HashMap<>(currentFiles.size());
+ for (HStoreFile sf : currentFiles) {
+ currentFilesSet.put(sf.getFileInfo(), sf);
+ }
+ HashMap compactedFilesSet = new HashMap<>(compactedFiles.size());
+ for (HStoreFile sf : compactedFiles) {
+ compactedFilesSet.put(sf.getFileInfo(), sf);
+ }
+
+ Set newFilesSet = new HashSet(newFiles);
+ // Exclude the files that have already been compacted
+ newFilesSet = Sets.difference(newFilesSet, compactedFilesSet.keySet());
+ Set toBeAddedFiles = Sets.difference(newFilesSet, currentFilesSet.keySet());
+ Set toBeRemovedFiles = Sets.difference(currentFilesSet.keySet(), newFilesSet);
+
+ if (toBeAddedFiles.isEmpty() && toBeRemovedFiles.isEmpty()) {
+ return;
+ }
+
+ LOG.info("Refreshing store files for " + this + " files to add: " + toBeAddedFiles +
+ " files to remove: " + toBeRemovedFiles);
+
+ Set toBeRemovedStoreFiles = new HashSet<>(toBeRemovedFiles.size());
+ for (StoreFileInfo sfi : toBeRemovedFiles) {
+ toBeRemovedStoreFiles.add(currentFilesSet.get(sfi));
+ }
+
+ // try to open the files
+ List openedFiles = openStoreFiles(toBeAddedFiles, false);
+
+ // propogate the file changes to the underlying store file manager
+ replaceStoreFiles(toBeRemovedStoreFiles, openedFiles); // won't throw an exception
+ }
+
+ /**
+ * Commit the given {@code files}.
+ *
+ * We will move the file into data directory, and open it.
+ * @param files the files want to commit
+ * @param validate whether to validate the store files
+ * @return the committed store files
+ */
+ public List commitStoreFiles(List files, boolean validate) throws IOException {
+ List committedFiles = new ArrayList<>(files.size());
+ HRegionFileSystem hfs = ctx.getRegionFileSystem();
+ String familyName = ctx.getFamily().getNameAsString();
+ Path storeDir = hfs.getStoreDir(familyName);
+ for (Path file : files) {
+ try {
+ if (validate) {
+ validateStoreFile(file);
+ }
+ Path committedPath;
+ // As we want to support writing to data directory directly, here we need to check whether
+ // the store file is already in the right place
+ if (file.getParent() != null && file.getParent().equals(storeDir)) {
+ // already in the right place, skip renmaing
+ committedPath = file;
+ } else {
+ // Write-out finished successfully, move into the right spot
+ committedPath = hfs.commitStoreFile(familyName, file);
+ }
+ HStoreFile sf = createStoreFileAndReader(committedPath);
+ committedFiles.add(sf);
+ } catch (IOException e) {
+ LOG.error("Failed to commit store file {}", file, e);
+ // Try to delete the files we have committed before.
+ // It is OK to fail when deleting as leaving the file there does not cause any data
+ // corruption problem. It just introduces some duplicated data which may impact read
+ // performance a little when reading before compaction.
+ for (HStoreFile sf : committedFiles) {
+ Path pathToDelete = sf.getPath();
+ try {
+ sf.deleteStoreFile();
+ } catch (IOException deleteEx) {
+ LOG.warn(HBaseMarkers.FATAL, "Failed to delete committed store file {}", pathToDelete,
+ deleteEx);
+ }
+ }
+ throw new IOException("Failed to commit the flush", e);
+ }
+ }
+ return committedFiles;
+ }
+
+ @FunctionalInterface
+ public interface IOExceptionRunnable {
+ void run() throws IOException;
+ }
+
+ /**
+ * Add the store files to store file manager, and also record it in the store file tracker.
+ *
+ * The {@code actionAfterAdding} will be executed after the insertion to store file manager, under
+ * the lock protection. Usually this is for clear the memstore snapshot.
+ */
+ public void addStoreFiles(Collection storeFiles,
+ IOExceptionRunnable actionAfterAdding) throws IOException {
+ storeFileTracker.add(StoreUtils.toStoreFileInfo(storeFiles));
+ writeLock();
+ try {
+ storeFileManager.insertNewFiles(storeFiles);
+ actionAfterAdding.run();
+ } finally {
+ // We need the lock, as long as we are updating the storeFiles
+ // or changing the memstore. Let us release it before calling
+ // notifyChangeReadersObservers. See HBASE-4485 for a possible
+ // deadlock scenario that could have happened if continue to hold
+ // the lock.
+ writeUnlock();
+ }
+ }
+
+ public void replaceStoreFiles(Collection compactedFiles,
+ Collection newFiles) throws IOException {
+ storeFileTracker.replace(StoreUtils.toStoreFileInfo(compactedFiles),
+ StoreUtils.toStoreFileInfo(newFiles));
+ writeLock();
+ try {
+ storeFileManager.addCompactionResults(compactedFiles, newFiles);
+ } finally {
+ writeUnlock();
+ }
+ }
+
+ public void removeCompactedFiles(Collection compactedFiles) {
+ writeLock();
+ try {
+ storeFileManager.removeCompactedFiles(compactedFiles);
+ } finally {
+ writeUnlock();
+ }
}
/**
* Create the StoreEngine configured for the given Store.
- * @param store The store. An unfortunate dependency needed due to it
- * being passed to coprocessors via the compactor.
+ * @param store The store. An unfortunate dependency needed due to it being passed to coprocessors
+ * via the compactor.
* @param conf Store configuration.
* @param cellComparator CellComparator for storeFileManager.
* @return StoreEngine to use.
*/
- public static StoreEngine, ?, ?, ?> create(
- HStore store, Configuration conf, CellComparator cellComparator) throws IOException {
+ public static StoreEngine, ?, ?, ?> create(HStore store, Configuration conf,
+ CellComparator cellComparator) throws IOException {
String className = conf.get(STORE_ENGINE_CLASS_KEY, DEFAULT_STORE_ENGINE_CLASS.getName());
try {
- StoreEngine,?,?,?> se = ReflectionUtils.instantiateWithCustomCtor(
- className, new Class[] { }, new Object[] { });
+ StoreEngine, ?, ?, ?> se =
+ ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {}, new Object[] {});
se.createComponentsOnce(conf, store, cellComparator);
return se;
} catch (Exception e) {
throw new IOException("Unable to load configured store engine '" + className + "'", e);
}
}
+
+ /**
+ * Whether the implementation of the used storefile tracker requires you to write to temp
+ * directory first, i.e, does not allow broken store files under the actual data directory.
+ */
+ public boolean requireWritingToTmpDirFirst() {
+ return storeFileTracker.requireWritingToTmpDirFirst();
+ }
+
+ /**
+ * Resets the compaction writer when the new file is committed and used as active storefile.
+ * This step is necessary for the correctness of BrokenStoreFileCleanerChore. It lets the
+ * CleanerChore know that compaction is done and the file can be cleaned up if compaction
+ * have failed. Currently called in
+ * @see HStore#doCompaction(CompactionRequestImpl, Collection, User, long, List)
+ */
+ public void resetCompactionWriter(){
+ compactor.resetWriter();
+ }
+
+ @RestrictedApi(explanation = "Should only be called in TestHStore", link = "",
+ allowedOnPath = ".*/TestHStore.java")
+ ReadWriteLock getLock() {
+ return storeLock;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
index 27127f3a6c64..a40b209c6ebb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
@@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import com.google.errorprone.annotations.RestrictedApi;
import java.io.IOException;
import java.util.Collection;
import java.util.Comparator;
@@ -49,12 +50,16 @@ public interface StoreFileManager {
* Loads the initial store files into empty StoreFileManager.
* @param storeFiles The files to load.
*/
+ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "",
+ allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)")
void loadFiles(List storeFiles);
/**
* Adds new files, either for from MemStore flush or bulk insert, into the structure.
* @param sfs New store files.
*/
+ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "",
+ allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)")
void insertNewFiles(Collection sfs);
/**
@@ -62,12 +67,16 @@ public interface StoreFileManager {
* @param compactedFiles The input files for the compaction.
* @param results The resulting files for the compaction.
*/
+ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "",
+ allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)")
void addCompactionResults(Collection compactedFiles, Collection results);
/**
* Remove the compacted files
* @param compactedFiles the list of compacted files
*/
+ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "",
+ allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)")
void removeCompactedFiles(Collection compactedFiles);
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
index 1064b6c70547..58031288f751 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
@@ -70,10 +70,17 @@ protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum,
writer.close();
}
+ protected final StoreFileWriter createWriter(MemStoreSnapshot snapshot, boolean alwaysIncludesTag)
+ throws IOException {
+ return store.getStoreEngine()
+ .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(snapshot.getCellsCount())
+ .compression(store.getColumnFamilyDescriptor().getCompressionType()).isCompaction(false)
+ .includeMVCCReadpoint(true).includesTag(alwaysIncludesTag || snapshot.isTagsPresent())
+ .shouldDropBehind(false));
+ }
/**
* Creates the scanner for flushing snapshot. Also calls coprocessors.
- * @param snapshotScanners
* @return The scanner; null if coprocessor is canceling the flush.
*/
protected final InternalScanner createScanner(List snapshotScanners,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 454b244fb512..10a9330f8326 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -20,10 +20,13 @@
import java.io.IOException;
import java.util.Collection;
+import java.util.List;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.OptionalLong;
-
+import java.util.function.Predicate;
+import java.util.function.ToLongFunction;
+import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
@@ -42,10 +45,13 @@
* Utility functions for region server storage layer.
*/
@InterfaceAudience.Private
-public class StoreUtils {
+public final class StoreUtils {
private static final Logger LOG = LoggerFactory.getLogger(StoreUtils.class);
+ private StoreUtils() {
+ }
+
/**
* Creates a deterministic hash code for store file collection.
*/
@@ -171,4 +177,31 @@ public static Configuration createStoreConfiguration(Configuration conf, TableDe
return new CompoundConfiguration().add(conf).addBytesMap(td.getValues())
.addStringMap(cfd.getConfiguration()).addBytesMap(cfd.getValues());
}
+
+ public static List toStoreFileInfo(Collection storefiles) {
+ return storefiles.stream().map(HStoreFile::getFileInfo).collect(Collectors.toList());
+ }
+
+ public static long getTotalUncompressedBytes(List files) {
+ return files.stream()
+ .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::getTotalUncompressedBytes))
+ .sum();
+ }
+
+ public static long getStorefilesSize(Collection files,
+ Predicate predicate) {
+ return files.stream().filter(predicate)
+ .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::length)).sum();
+ }
+
+ public static long getStorefileFieldSize(HStoreFile file, ToLongFunction f) {
+ if (file == null) {
+ return 0L;
+ }
+ StoreFileReader reader = file.getReader();
+ if (reader == null) {
+ return 0L;
+ }
+ return f.applyAsLong(reader);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java
index fc0598d89ac0..a4e943ac8b04 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java
@@ -58,7 +58,7 @@ public void setNoStripeMetadata() {
}
@Override
- protected Collection writers() {
+ public Collection writers() {
return existingWriters;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java
index 14863a69a9b1..bfb3f649ff27 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java
@@ -20,20 +20,19 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor;
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
index 1560aef5f6b3..f8183b7645a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
@@ -70,7 +70,7 @@ public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum
StripeMultiFileWriter mw = null;
try {
mw = req.createWriter(); // Writer according to the policy.
- StripeMultiFileWriter.WriterFactory factory = createWriterFactory(cellsCount);
+ StripeMultiFileWriter.WriterFactory factory = createWriterFactory(snapshot);
StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
mw.init(storeScanner, factory);
@@ -98,13 +98,12 @@ public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum
return result;
}
- private StripeMultiFileWriter.WriterFactory createWriterFactory(final long kvCount) {
+ private StripeMultiFileWriter.WriterFactory createWriterFactory(MemStoreSnapshot snapshot) {
return new StripeMultiFileWriter.WriterFactory() {
@Override
public StoreFileWriter createWriter() throws IOException {
- StoreFileWriter writer = store.createWriterInTmp(kvCount,
- store.getColumnFamilyDescriptor().getCompressionType(), false, true, true, false);
- return writer;
+ // XXX: it used to always pass true for includesTag, re-consider?
+ return StripeStoreFlusher.this.createWriter(snapshot, true);
}
};
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java
index 42841bfee531..19b7a98627e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java
@@ -51,13 +51,14 @@ protected void initMultiWriter(AbstractMultiFileWriter writer, InternalScanner s
WriterFactory writerFactory = new WriterFactory() {
@Override
public StoreFileWriter createWriter() throws IOException {
- return createTmpWriter(fd, shouldDropBehind, major);
+ return AbstractMultiOutputCompactor.this.createWriter(fd, shouldDropBehind, major);
}
@Override
public StoreFileWriter createWriterWithStoragePolicy(String fileStoragePolicy)
- throws IOException {
- return createTmpWriter(fd, shouldDropBehind, fileStoragePolicy, major);
+ throws IOException {
+ return AbstractMultiOutputCompactor.this.createWriter(fd, shouldDropBehind,
+ fileStoragePolicy, major);
}
};
// Prepare multi-writer, and perform the compaction using scanner and writer.
@@ -67,7 +68,7 @@ public StoreFileWriter createWriterWithStoragePolicy(String fileStoragePolicy)
}
@Override
- protected void abortWriter(T writer) throws IOException {
+ protected void abortWriter() throws IOException {
FileSystem fs = store.getFileSystem();
for (Path leftoverFile : writer.abortWriters()) {
try {
@@ -78,5 +79,7 @@ protected void abortWriter(T writer) throws IOException {
e);
}
}
+ //this step signals that the target file is no longer writen and can be cleaned up
+ writer = null;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index e524f7dfd5fd..0ee7d349e4c5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -25,10 +25,12 @@
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
-
+import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
@@ -38,7 +40,9 @@
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.hadoop.hbase.regionserver.AbstractMultiFileWriter;
import org.apache.hadoop.hbase.regionserver.CellSink;
+import org.apache.hadoop.hbase.regionserver.CreateStoreFileWriterParams;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -61,6 +65,7 @@
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
/**
@@ -91,6 +96,8 @@ public abstract class Compactor {
private final boolean dropCacheMajor;
private final boolean dropCacheMinor;
+ protected T writer = null;
+
//TODO: depending on Store is not good but, realistically, all compactors currently do.
Compactor(Configuration conf, HStore store) {
this.conf = conf;
@@ -261,29 +268,32 @@ public InternalScanner createScanner(ScanInfo scanInfo, List s
}
};
+ protected final CreateStoreFileWriterParams createParams(FileDetails fd, boolean shouldDropBehind,
+ boolean major) {
+ return CreateStoreFileWriterParams.create().maxKeyCount(fd.maxKeyCount)
+ .compression(major ? majorCompactionCompression : minorCompactionCompression)
+ .isCompaction(true).includeMVCCReadpoint(fd.maxMVCCReadpoint > 0)
+ .includesTag(fd.maxTagsLength > 0).shouldDropBehind(shouldDropBehind)
+ .totalCompactedFilesSize(fd.totalCompactedFilesSize);
+ }
+
/**
- * Creates a writer for a new file in a temporary directory.
+ * Creates a writer for a new file.
* @param fd The file details.
- * @return Writer for a new StoreFile in the tmp dir.
+ * @return Writer for a new StoreFile
* @throws IOException if creation failed
*/
- protected final StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind, boolean major)
- throws IOException {
+ protected final StoreFileWriter createWriter(FileDetails fd, boolean shouldDropBehind,
+ boolean major) throws IOException {
// When all MVCC readpoints are 0, don't write them.
// See HBASE-8166, HBASE-12600, and HBASE-13389.
- return store.createWriterInTmp(fd.maxKeyCount,
- major ? majorCompactionCompression : minorCompactionCompression,
- true, fd.maxMVCCReadpoint > 0,
- fd.maxTagsLength > 0, shouldDropBehind, fd.totalCompactedFilesSize,
- HConstants.EMPTY_STRING);
+ return store.getStoreEngine().createWriter(createParams(fd, shouldDropBehind, major));
}
- protected final StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind,
- String fileStoragePolicy, boolean major) throws IOException {
- return store.createWriterInTmp(fd.maxKeyCount,
- major ? majorCompactionCompression : minorCompactionCompression,
- true, fd.maxMVCCReadpoint > 0,
- fd.maxTagsLength > 0, shouldDropBehind, fd.totalCompactedFilesSize, fileStoragePolicy);
+ protected final StoreFileWriter createWriter(FileDetails fd, boolean shouldDropBehind,
+ String fileStoragePolicy, boolean major) throws IOException {
+ return store.getStoreEngine()
+ .createWriter(createParams(fd, shouldDropBehind, major).fileStoragePolicy(fileStoragePolicy));
}
private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType scanType,
@@ -320,7 +330,6 @@ protected final List compact(final CompactionRequestImpl request,
// Find the smallest read point across all the Scanners.
long smallestReadPoint = getSmallestReadPoint();
- T writer = null;
boolean dropCache;
if (request.isMajor() || request.isAllFiles()) {
dropCache = this.dropCacheMajor;
@@ -344,8 +353,13 @@ protected final List compact(final CompactionRequestImpl request,
smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint);
cleanSeqId = true;
}
+ if (writer != null){
+ LOG.warn("Writer exists when it should not: " + getCompactionTargets().stream()
+ .map(n -> n.toString())
+ .collect(Collectors.joining(", ", "{ ", " }")));
+ }
writer = sinkFactory.createWriter(scanner, fd, dropCache, request.isMajor());
- finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId,
+ finished = performCompaction(fd, scanner, smallestReadPoint, cleanSeqId,
throughputController, request.isAllFiles(), request.getFiles().size());
if (!finished) {
throw new InterruptedIOException("Aborting compaction of store " + store + " in region "
@@ -365,24 +379,23 @@ protected final List compact(final CompactionRequestImpl request,
Closeables.close(scanner, true);
}
if (!finished && writer != null) {
- abortWriter(writer);
+ abortWriter();
}
}
assert finished : "We should have exited the method on all error paths";
assert writer != null : "Writer should be non-null if no error";
- return commitWriter(writer, fd, request);
+ return commitWriter(fd, request);
}
- protected abstract List commitWriter(T writer, FileDetails fd,
+ protected abstract List commitWriter(FileDetails fd,
CompactionRequestImpl request) throws IOException;
- protected abstract void abortWriter(T writer) throws IOException;
+ protected abstract void abortWriter() throws IOException;
/**
* Performs the compaction.
* @param fd FileDetails of cell sink writer
* @param scanner Where to read from.
- * @param writer Where to write to.
* @param smallestReadPoint Smallest read point.
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <=
* smallestReadPoint
@@ -390,7 +403,7 @@ protected abstract List commitWriter(T writer, FileDetails fd,
* @param numofFilesToCompact the number of files to compact
* @return Whether compaction ended; false if it was interrupted for some reason.
*/
- protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer,
+ protected boolean performCompaction(FileDetails fd, InternalScanner scanner,
long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController,
boolean major, int numofFilesToCompact) throws IOException {
assert writer instanceof ShipperListener;
@@ -533,4 +546,24 @@ protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
return new StoreScanner(store, scanInfo, scanners, smallestReadPoint, earliestPutTs,
dropDeletesFromRow, dropDeletesToRow);
}
+
+ public List getCompactionTargets(){
+ if (writer == null){
+ return Collections.emptyList();
+ }
+ synchronized (writer){
+ if (writer instanceof StoreFileWriter){
+ return Arrays.asList(((StoreFileWriter)writer).getPath());
+ }
+ return ((AbstractMultiFileWriter)writer).writers().stream().map(sfw -> sfw.getPath()).collect(
+ Collectors.toList());
+ }
+ }
+
+ /**
+ * Reset the Writer when the new storefiles were successfully added
+ */
+ public void resetWriter(){
+ writer = null;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
index fd5433082903..43e037c5e702 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
@@ -79,8 +79,10 @@ public DateTieredMultiFileWriter createWriter(InternalScanner scanner, FileDetai
}
@Override
- protected List commitWriter(DateTieredMultiFileWriter writer, FileDetails fd,
+ protected List commitWriter(FileDetails fd,
CompactionRequestImpl request) throws IOException {
- return writer.commitWriters(fd.maxSeqId, request.isAllFiles(), request.getFiles());
+ List pathList =
+ writer.commitWriters(fd.maxSeqId, request.isAllFiles(), request.getFiles());
+ return pathList;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java
index 49d3e8ee01e7..ad2384a97ab8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java
@@ -45,14 +45,14 @@ public DefaultCompactor(Configuration conf, HStore store) {
}
private final CellSinkFactory writerFactory =
- new CellSinkFactory() {
- @Override
- public StoreFileWriter createWriter(InternalScanner scanner,
- org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
- boolean shouldDropBehind, boolean major) throws IOException {
- return createTmpWriter(fd, shouldDropBehind, major);
- }
- };
+ new CellSinkFactory() {
+ @Override
+ public StoreFileWriter createWriter(InternalScanner scanner,
+ org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
+ boolean shouldDropBehind, boolean major) throws IOException {
+ return DefaultCompactor.this.createWriter(fd, shouldDropBehind, major);
+ }
+ };
/**
* Do a minor/major compaction on an explicit set of storefiles from a Store.
@@ -63,7 +63,7 @@ public List compact(final CompactionRequestImpl request,
}
@Override
- protected List commitWriter(StoreFileWriter writer, FileDetails fd,
+ protected List commitWriter(FileDetails fd,
CompactionRequestImpl request) throws IOException {
List newFiles = Lists.newArrayList(writer.getPath());
writer.appendMetadata(fd.maxSeqId, request.isAllFiles(), request.getFiles());
@@ -72,12 +72,19 @@ protected List commitWriter(StoreFileWriter writer, FileDetails fd,
}
@Override
+ protected void abortWriter() throws IOException {
+ abortWriter(writer);
+ }
+
protected void abortWriter(StoreFileWriter writer) throws IOException {
Path leftoverFile = writer.getPath();
try {
writer.close();
} catch (IOException e) {
LOG.warn("Failed to close the writer after an unfinished compaction.", e);
+ } finally {
+ //this step signals that the target file is no longer writen and can be cleaned up
+ writer = null;
}
try {
store.getFileSystem().delete(leftoverFile, false);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
index 547555e3812e..060a11b41fe6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
@@ -125,7 +125,7 @@ public StripeMultiFileWriter createWriter(InternalScanner scanner, FileDetails f
}
@Override
- protected List commitWriter(StripeMultiFileWriter writer, FileDetails fd,
+ protected List commitWriter(FileDetails fd,
CompactionRequestImpl request) throws IOException {
List newFiles = writer.commitWriters(fd.maxSeqId, request.isMajor(), request.getFiles());
assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata.";
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
new file mode 100644
index 000000000000..b1e298dbbe22
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.storefiletracker;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.hbase.regionserver.StoreContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The default implementation for store file tracker, where we do not persist the store file list,
+ * and use listing when loading store files.
+ */
+@InterfaceAudience.Private
+class DefaultStoreFileTracker extends StoreFileTrackerBase {
+
+ public DefaultStoreFileTracker(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) {
+ super(conf, isPrimaryReplica, ctx);
+ }
+
+ @Override
+ public List load() throws IOException {
+ List files =
+ ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString());
+ return files != null ? files : Collections.emptyList();
+ }
+
+ @Override
+ public boolean requireWritingToTmpDirFirst() {
+ return true;
+ }
+
+ @Override
+ protected void doAddNewStoreFiles(Collection newFiles) throws IOException {
+ // NOOP
+ }
+
+ @Override
+ protected void doAddCompactionResults(Collection compactedFiles,
+ Collection newFiles) throws IOException {
+ // NOOP
+ }
+
+ @Override
+ public void set(List files) {
+ // NOOP
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
new file mode 100644
index 000000000000..8d9b66e53d2a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.storefiletracker;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.regionserver.StoreContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.StoreFileEntry;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.StoreFileList;
+
+/**
+ * A file based store file tracker.
+ *
+ * For this tracking way, the store file list will be persistent into a file, so we can write the
+ * new store files directly to the final data directory, as we will not load the broken files. This
+ * will greatly reduce the time for flush and compaction on some object storages as a rename is
+ * actual a copy on them. And it also avoid listing when loading store file list, which could also
+ * speed up the loading of store files as listing is also not a fast operation on most object
+ * storages.
+ */
+@InterfaceAudience.Private
+class FileBasedStoreFileTracker extends StoreFileTrackerBase {
+
+ private final StoreFileListFile backedFile;
+
+ private final Map storefiles = new HashMap<>();
+
+ public FileBasedStoreFileTracker(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) {
+ super(conf, isPrimaryReplica, ctx);
+ //CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table
+ //descriptors with the SFT impl specific configs. By the time this happens, the table has no
+ //regions nor stores yet, so it can't create a proper StoreContext.
+ if (ctx != null) {
+ backedFile = new StoreFileListFile(ctx);
+ } else {
+ backedFile = null;
+ }
+ }
+
+ @Override
+ public List load() throws IOException {
+ StoreFileList list = backedFile.load();
+ if (list == null) {
+ return Collections.emptyList();
+ }
+ FileSystem fs = ctx.getRegionFileSystem().getFileSystem();
+ List infos = new ArrayList<>();
+ for (StoreFileEntry entry : list.getStoreFileList()) {
+ infos.add(ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, ctx.getRegionInfo(),
+ ctx.getRegionFileSystem().getRegionInfoForFS(), ctx.getFamily().getNameAsString(),
+ new Path(ctx.getFamilyStoreDirectoryPath(), entry.getName())));
+ }
+ // In general, for primary replica, the load method should only be called once when
+ // initialization, so we do not need synchronized here. And for secondary replicas, though the
+ // load method could be called multiple times, we will never call other methods so no
+ // synchronized is also fine.
+ // But we have a refreshStoreFiles method in the Region interface, which can be called by CPs,
+ // and we have a RefreshHFilesEndpoint example to expose the refreshStoreFiles method as RPC, so
+ // for safety, let's still keep the synchronized here.
+ synchronized (storefiles) {
+ for (StoreFileInfo info : infos) {
+ storefiles.put(info.getPath().getName(), info);
+ }
+ }
+ return infos;
+ }
+
+ @Override
+ public boolean requireWritingToTmpDirFirst() {
+ return false;
+ }
+
+ private StoreFileEntry toStoreFileEntry(StoreFileInfo info) {
+ return StoreFileEntry.newBuilder().setName(info.getPath().getName()).setSize(info.getSize())
+ .build();
+ }
+
+ @Override
+ protected void doAddNewStoreFiles(Collection newFiles) throws IOException {
+ synchronized (storefiles) {
+ StoreFileList.Builder builder = StoreFileList.newBuilder();
+ for (StoreFileInfo info : storefiles.values()) {
+ builder.addStoreFile(toStoreFileEntry(info));
+ }
+ for (StoreFileInfo info : newFiles) {
+ builder.addStoreFile(toStoreFileEntry(info));
+ }
+ backedFile.update(builder);
+ for (StoreFileInfo info : newFiles) {
+ storefiles.put(info.getPath().getName(), info);
+ }
+ }
+ }
+
+ @Override
+ protected void doAddCompactionResults(Collection compactedFiles,
+ Collection newFiles) throws IOException {
+ Set compactedFileNames =
+ compactedFiles.stream().map(info -> info.getPath().getName()).collect(Collectors.toSet());
+ synchronized (storefiles) {
+ StoreFileList.Builder builder = StoreFileList.newBuilder();
+ storefiles.forEach((name, info) -> {
+ if (compactedFileNames.contains(name)) {
+ return;
+ }
+ builder.addStoreFile(toStoreFileEntry(info));
+ });
+ for (StoreFileInfo info : newFiles) {
+ builder.addStoreFile(toStoreFileEntry(info));
+ }
+ backedFile.update(builder);
+ for (String name : compactedFileNames) {
+ storefiles.remove(name);
+ }
+ for (StoreFileInfo info : newFiles) {
+ storefiles.put(info.getPath().getName(), info);
+ }
+ }
+ }
+
+ @Override
+ public void set(List files) throws IOException {
+ synchronized (storefiles) {
+ storefiles.clear();
+ StoreFileList.Builder builder = StoreFileList.newBuilder();
+ for (StoreFileInfo info : files) {
+ storefiles.put(info.getPath().getName(), info);
+ builder.addStoreFile(toStoreFileEntry(info));
+ }
+ backedFile.update(builder);
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrateStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrateStoreFileTrackerProcedure.java
new file mode 100644
index 000000000000..7cf3d1e8b5ac
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrateStoreFileTrackerProcedure.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.storefiletracker;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ModifyTableDescriptorProcedure;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Procedure for migrating StoreFileTracker information to table descriptor.
+ */
+@InterfaceAudience.Private
+public class MigrateStoreFileTrackerProcedure extends ModifyTableDescriptorProcedure {
+
+ public MigrateStoreFileTrackerProcedure(){}
+
+ public MigrateStoreFileTrackerProcedure(MasterProcedureEnv env, TableDescriptor unmodified) {
+ super(env, unmodified);
+ }
+
+ @Override
+ protected Optional | | |