From 9ee19089774507efcd2f21e28920e7d074ffc451 Mon Sep 17 00:00:00 2001 From: Ankit Solomon Date: Thu, 10 Jul 2025 20:33:46 +0530 Subject: [PATCH 1/7] HBASE-29310 Handle Bulk Load Operations in Continuous Backup --- .../hbase/backup/BackupRestoreConstants.java | 6 ++ .../backup/PointInTimeRestoreDriver.java | 17 +++- .../backup/PointInTimeRestoreRequest.java | 12 +++ .../hbase/backup/impl/BackupAdminImpl.java | 57 ++++++++--- .../hbase/backup/impl/BackupSystemTable.java | 4 +- .../hadoop/hbase/backup/impl/BulkLoad.java | 8 +- .../TestIncrementalBackupWithContinuous.java | 94 +++++++++++++++---- .../hbase/backup/TestPointInTimeRestore.java | 19 +++- .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 5 + 9 files changed, 185 insertions(+), 37 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java index e38b044d71d6..ea3f65a60bf1 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -116,6 +116,12 @@ public interface BackupRestoreConstants { "Specifies a custom backup location for Point-In-Time Recovery (PITR). " + "If provided, this location will be used exclusively instead of deriving the path from the system table."; + String OPTION_FORCE_RESTORE = "f"; + String LONG_OPTION_FORCE_RESTORE = "force"; + String OPTION_FORCE_RESTORE_DESC = + "Flag to force Point-In-Time Recovery in case there is no backup post bulkload operation. " + + "This might result in data loss as these bulkloaded files will not be part of the restored table."; + String JOB_NAME_CONF_KEY = "mapreduce.job.name"; String BACKUP_CONFIG_STRING = diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java index abdf52f14302..617471b32645 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java @@ -18,8 +18,11 @@ package org.apache.hadoop.hbase.backup; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_WAL_DIR; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_FORCE_RESTORE; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_PITR_BACKUP_PATH; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_TO_DATETIME; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_RESTORE; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_RESTORE_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PITR_BACKUP_PATH; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PITR_BACKUP_PATH_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TO_DATETIME; @@ -69,6 +72,12 @@ protected int executeRestore(boolean check, TableName[] fromTables, TableName[] return -1; } + boolean force = cmd.hasOption(OPTION_FORCE_RESTORE); + if (force) { + LOG.debug("Found force option (-{}) in restore command, " + + "will force restore in case there is no backup post a bulkload", OPTION_FORCE_RESTORE); + } + String backupRootDir = cmd.getOptionValue(OPTION_PITR_BACKUP_PATH); try (final Connection conn = ConnectionFactory.createConnection(conf); @@ -101,9 +110,10 @@ protected int executeRestore(boolean check, TableName[] fromTables, TableName[] return -5; } - PointInTimeRestoreRequest pointInTimeRestoreRequest = new PointInTimeRestoreRequest.Builder() - .withBackupRootDir(backupRootDir).withCheck(check).withFromTables(fromTables) - .withToTables(toTables).withOverwrite(isOverwrite).withToDateTime(endTime).build(); + PointInTimeRestoreRequest pointInTimeRestoreRequest = + new PointInTimeRestoreRequest.Builder().withBackupRootDir(backupRootDir).withCheck(check) + .withFromTables(fromTables).withToTables(toTables).withOverwrite(isOverwrite) + .withToDateTime(endTime).withForce(force).build(); client.pointInTimeRestore(pointInTimeRestoreRequest); } catch (Exception e) { @@ -119,6 +129,7 @@ protected void addOptions() { addOptWithArg(OPTION_TO_DATETIME, LONG_OPTION_TO_DATETIME, OPTION_TO_DATETIME_DESC); addOptWithArg(OPTION_PITR_BACKUP_PATH, LONG_OPTION_PITR_BACKUP_PATH, OPTION_PITR_BACKUP_PATH_DESC); + addOptNoArg(OPTION_FORCE_RESTORE, LONG_OPTION_FORCE_RESTORE, OPTION_FORCE_RESTORE_DESC); } public static void main(String[] args) throws Exception { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java index f2462a1cfd18..b3c19813e3d0 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java @@ -32,6 +32,7 @@ public final class PointInTimeRestoreRequest { private final TableName[] toTables; private final boolean overwrite; private final long toDateTime; + private final boolean force; private PointInTimeRestoreRequest(Builder builder) { this.backupRootDir = builder.backupRootDir; @@ -40,6 +41,7 @@ private PointInTimeRestoreRequest(Builder builder) { this.toTables = builder.toTables; this.overwrite = builder.overwrite; this.toDateTime = builder.toDateTime; + this.force = builder.force; } public String getBackupRootDir() { @@ -66,6 +68,10 @@ public long getToDateTime() { return toDateTime; } + public boolean isForce() { + return force; + } + public static class Builder { private String backupRootDir; private boolean check = false; @@ -73,6 +79,7 @@ public static class Builder { private TableName[] toTables; private boolean overwrite = false; private long toDateTime; + private boolean force; public Builder withBackupRootDir(String backupRootDir) { this.backupRootDir = backupRootDir; @@ -104,6 +111,11 @@ public Builder withToDateTime(long dateTime) { return this; } + public Builder withForce(boolean force) { + this.force = force; + return this; + } + public PointInTimeRestoreRequest build() { return new PointInTimeRestoreRequest(this); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index e82d9804f9dc..520c5c285053 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -555,13 +555,14 @@ public void pointInTimeRestore(PointInTimeRestoreRequest request) throws IOExcep */ private void defaultPointInTimeRestore(PointInTimeRestoreRequest request) throws IOException { long endTime = request.getToDateTime(); + boolean isForce = request.isForce(); validateRequestToTime(endTime); TableName[] sTableArray = request.getFromTables(); TableName[] tTableArray = resolveTargetTables(sTableArray, request.getToTables()); // Validate PITR requirements - validatePitr(endTime, sTableArray, tTableArray); + validatePitr(endTime, sTableArray, tTableArray, isForce); // If only validation is required, log and return if (request.isCheck()) { @@ -629,8 +630,8 @@ private TableName[] resolveTargetTables(TableName[] sourceTables, TableName[] ta * @param tTableArray The target tables where the restore will be performed. * @throws IOException If PITR is not possible due to missing continuous backup or backup images. */ - private void validatePitr(long endTime, TableName[] sTableArray, TableName[] tTableArray) - throws IOException { + private void validatePitr(long endTime, TableName[] sTableArray, TableName[] tTableArray, + boolean isForce) throws IOException { try (BackupSystemTable table = new BackupSystemTable(conn)) { // Retrieve the set of tables with continuous backup enabled Map continuousBackupTables = table.getContinuousBackupTableSet(); @@ -643,7 +644,7 @@ private void validatePitr(long endTime, TableName[] sTableArray, TableName[] tTa // Ensure a valid backup and WALs exist for PITR validateBackupAvailability(sTableArray, tTableArray, endTime, continuousBackupTables, - backupInfos); + backupInfos, isForce); } } @@ -669,12 +670,12 @@ private void validateContinuousBackup(TableName[] tables, * the remaining duration up to the end time. */ private void validateBackupAvailability(TableName[] sTableArray, TableName[] tTableArray, - long endTime, Map continuousBackupTables, List backupInfos) - throws IOException { + long endTime, Map continuousBackupTables, List backupInfos, + boolean isForce) throws IOException { for (int i = 0; i < sTableArray.length; i++) { if ( !canPerformPitr(sTableArray[i], tTableArray[i], endTime, continuousBackupTables, - backupInfos) + backupInfos, isForce) ) { String errorMsg = "Could not find a valid backup and WALs for PITR for table: " + sTableArray[i].getNameAsString(); @@ -688,16 +689,16 @@ private void validateBackupAvailability(TableName[] sTableArray, TableName[] tTa * Checks whether PITR can be performed for a given source-target table pair. */ private boolean canPerformPitr(TableName stableName, TableName tTableName, long endTime, - Map continuousBackupTables, List backupInfos) { - return getValidBackupInfo(stableName, tTableName, endTime, continuousBackupTables, backupInfos) - != null; + Map continuousBackupTables, List backupInfos, boolean isForce) { + return getValidBackupInfo(stableName, tTableName, endTime, continuousBackupTables, backupInfos, + isForce) != null; } /** * Finds a valid backup for PITR that meets the required conditions. */ private BackupInfo getValidBackupInfo(TableName sTableName, TableName tTablename, long endTime, - Map continuousBackupTables, List backupInfos) { + Map continuousBackupTables, List backupInfos, boolean isForce) { for (BackupInfo info : backupInfos) { if (isValidBackupForPitr(info, sTableName, endTime, continuousBackupTables)) { @@ -707,6 +708,10 @@ private BackupInfo getValidBackupInfo(TableName sTableName, TableName tTablename try { if (validateRequest(restoreRequest)) { + // check if any bulkload entry exists post this backup time and before "endtime" + if (!isForce) { + checkBulkLoadAfterBackup(conn, sTableName, info, endTime); + } return info; } } catch (IOException e) { @@ -717,6 +722,32 @@ private BackupInfo getValidBackupInfo(TableName sTableName, TableName tTablename return null; } + /** + * Checks if any bulk load operation occurred for the specified table post last successful backup + * and before restore time. + * @param conn Active HBase connection + * @param sTableName Table for which to check bulk load history + * @param info Last successful backup before the target recovery time + * @param endTime Target recovery time + * @throws IOException if a bulkload entry is found in between backup time and endtime + */ + private void checkBulkLoadAfterBackup(Connection conn, TableName sTableName, BackupInfo info, + long endTime) throws IOException { + try (BackupSystemTable backupSystemTable = new BackupSystemTable(conn)) { + List bulkLoads = + backupSystemTable.readBulkloadRows(Collections.singletonList(sTableName)); + for (BulkLoad load : bulkLoads) { + long lastBackupTs = + (info.getType() == BackupType.FULL) ? info.getStartTs() : info.getIncrCommittedWalTs(); + if (lastBackupTs < load.getTimestamp() && load.getTimestamp() < endTime) { + throw new IOException("Bulk load operation detected after last successful backup for " + + "table: " + sTableName + ". Please use --force flag if you still want to restore but " + + "these bulk-loaded files will not be part of the restored table."); + } + } + } + } + /** * Determines if the given backup is valid for PITR. *

@@ -745,8 +776,8 @@ private boolean isValidBackupForPitr(BackupInfo info, TableName tableName, long private void restoreTableWithWalReplay(TableName sourceTable, TableName targetTable, long endTime, Map continuousBackupTables, List backupInfos, PointInTimeRestoreRequest request) throws IOException { - BackupInfo backupInfo = - getValidBackupInfo(sourceTable, targetTable, endTime, continuousBackupTables, backupInfos); + BackupInfo backupInfo = getValidBackupInfo(sourceTable, targetTable, endTime, + continuousBackupTables, backupInfos, request.isForce()); if (backupInfo == null) { String errorMsg = "Could not find a valid backup and WALs for PITR for table: " + sourceTable.getNameAsString(); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index f5be8893047b..334238754ff2 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -457,8 +457,10 @@ public List readBulkloadRows(List tableList) throws IOExcep String path = null; String region = null; byte[] row = null; + long timestamp = 0L; for (Cell cell : res.listCells()) { row = CellUtil.cloneRow(cell); + timestamp = cell.getTimestamp(); String rowStr = Bytes.toString(row); region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr); if ( @@ -473,7 +475,7 @@ public List readBulkloadRows(List tableList) throws IOExcep path = Bytes.toString(CellUtil.cloneValue(cell)); } } - result.add(new BulkLoad(table, region, fam, path, row)); + result.add(new BulkLoad(table, region, fam, path, row, timestamp)); LOG.debug("found orig " + path + " for " + fam + " of table " + region); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java index 0f1e79c976bb..34f007cc8411 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java @@ -34,14 +34,16 @@ public class BulkLoad { private final String columnFamily; private final String hfilePath; private final byte[] rowKey; + private final long timestamp; public BulkLoad(TableName tableName, String region, String columnFamily, String hfilePath, - byte[] rowKey) { + byte[] rowKey, long timestamp) { this.tableName = tableName; this.region = region; this.columnFamily = columnFamily; this.hfilePath = hfilePath; this.rowKey = rowKey; + this.timestamp = timestamp; } public TableName getTableName() { @@ -64,6 +66,10 @@ public byte[] getRowKey() { return rowKey; } + public long getTimestamp() { + return timestamp; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java index 79d1df645b95..55de725416cb 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore.REPLICATION_MARKER_ENABLED_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -145,35 +146,36 @@ public void testContinuousBackupWithIncrementalBackupAndBulkloadSuccess() throws conf1.setBoolean(REPLICATION_MARKER_ENABLED_KEY, true); String methodName = Thread.currentThread().getStackTrace()[1].getMethodName(); try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { - // The test starts with some data, and no bulk loaded rows. - int expectedRowCount = NB_ROWS_IN_BATCH; - assertEquals(expectedRowCount, TEST_UTIL.countRows(table1)); - assertTrue(systemTable.readBulkloadRows(List.of(table1)).isEmpty()); + int expectedRowCount = TEST_UTIL.countRows(table1); + int bulkloadRowCount = systemTable.readBulkloadRows(List.of(table1)).size(); // Bulk loads aren't tracked if the table isn't backed up yet - performBulkLoad("bulk1", methodName); + performBulkLoad("bulk1", methodName, table1); expectedRowCount += ROWS_IN_BULK_LOAD; assertEquals(expectedRowCount, TEST_UTIL.countRows(table1)); - assertEquals(0, systemTable.readBulkloadRows(List.of(table1)).size()); + assertEquals(bulkloadRowCount, systemTable.readBulkloadRows(List.of(table1)).size()); // Create a backup, bulk loads are now being tracked String backup1 = backupTables(BackupType.FULL, List.of(table1), BACKUP_ROOT_DIR, true); assertTrue(checkSucceeded(backup1)); loadTable(TEST_UTIL.getConnection().getTable(table1)); + expectedRowCount = expectedRowCount + NB_ROWS_IN_BATCH; assertEquals(expectedRowCount, TEST_UTIL.countRows(table1)); - performBulkLoad("bulk2", methodName); + performBulkLoad("bulk2", methodName, table1); expectedRowCount += ROWS_IN_BULK_LOAD; + bulkloadRowCount++; assertEquals(expectedRowCount, TEST_UTIL.countRows(table1)); - assertEquals(1, systemTable.readBulkloadRows(List.of(table1)).size()); + assertEquals(bulkloadRowCount, systemTable.readBulkloadRows(List.of(table1)).size()); // Creating an incremental backup clears the bulk loads - performBulkLoad("bulk4", methodName); - performBulkLoad("bulk5", methodName); - performBulkLoad("bulk6", methodName); + performBulkLoad("bulk4", methodName, table1); + performBulkLoad("bulk5", methodName, table1); + performBulkLoad("bulk6", methodName, table1); expectedRowCount += 3 * ROWS_IN_BULK_LOAD; + bulkloadRowCount += 3; assertEquals(expectedRowCount, TEST_UTIL.countRows(table1)); - assertEquals(4, systemTable.readBulkloadRows(List.of(table1)).size()); + assertEquals(bulkloadRowCount, systemTable.readBulkloadRows(List.of(table1)).size()); String backup2 = backupTables(BackupType.INCREMENTAL, List.of(table1), BACKUP_ROOT_DIR, true); assertTrue(checkSucceeded(backup2)); assertEquals(expectedRowCount, TEST_UTIL.countRows(table1)); @@ -181,7 +183,7 @@ public void testContinuousBackupWithIncrementalBackupAndBulkloadSuccess() throws int rowCountAfterBackup2 = expectedRowCount; // Doing another bulk load, to check that this data will disappear after a restore operation - performBulkLoad("bulk7", methodName); + performBulkLoad("bulk7", methodName, table1); expectedRowCount += ROWS_IN_BULK_LOAD; assertEquals(expectedRowCount, TEST_UTIL.countRows(table1)); List bulkloadsTemp = systemTable.readBulkloadRows(List.of(table1)); @@ -197,6 +199,65 @@ public void testContinuousBackupWithIncrementalBackupAndBulkloadSuccess() throws assertEquals(rowCountAfterBackup2, TEST_UTIL.countRows(table1)); List bulkLoads = systemTable.readBulkloadRows(List.of(table1)); assertEquals(3, bulkLoads.size()); + } finally { + conf1.setBoolean(REPLICATION_MARKER_ENABLED_KEY, REPLICATION_MARKER_ENABLED_DEFAULT); + } + } + + @Test + public void testForcePITR() throws Exception { + conf1.setBoolean(REPLICATION_MARKER_ENABLED_KEY, true); + String methodName = Thread.currentThread().getStackTrace()[1].getMethodName(); + TableName tableName1 = TableName.valueOf("table_" + methodName); + TEST_UTIL.createTable(tableName1, famName); + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + + // The test starts with no data, and no bulk loaded rows. + int expectedRowCount = 0; + assertEquals(expectedRowCount, TEST_UTIL.countRows(tableName1)); + assertTrue(systemTable.readBulkloadRows(List.of(tableName1)).isEmpty()); + + // Create continuous backup, bulk loads are now being tracked + String backup1 = backupTables(BackupType.FULL, List.of(tableName1), BACKUP_ROOT_DIR, true); + assertTrue(checkSucceeded(backup1)); + + loadTable(TEST_UTIL.getConnection().getTable(tableName1)); + expectedRowCount = expectedRowCount + NB_ROWS_IN_BATCH; + performBulkLoad("bulkPreIncr", methodName, tableName1); + expectedRowCount += ROWS_IN_BULK_LOAD; + assertEquals(expectedRowCount, TEST_UTIL.countRows(tableName1)); + assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size()); + + loadTable(TEST_UTIL.getConnection().getTable(tableName1)); + expectedRowCount = expectedRowCount + NB_ROWS_IN_BATCH; + Thread.sleep(5000); + + // Incremental backup + String backup2 = + backupTables(BackupType.INCREMENTAL, List.of(tableName1), BACKUP_ROOT_DIR, true); + assertTrue(checkSucceeded(backup2)); + assertEquals(0, systemTable.readBulkloadRows(List.of(tableName1)).size()); + + performBulkLoad("bulkPostIncr", methodName, tableName1); + assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size()); + + loadTable(TEST_UTIL.getConnection().getTable(tableName1)); + Thread.sleep(10000); + long restoreTs = BackupUtils.getReplicationCheckpoint(TEST_UTIL.getConnection()); + + // expect restore failure due to no backup post bulkPostIncr bulkload + TableName restoredTable = TableName.valueOf("restoredTable"); + String[] args = TestPointInTimeRestore.buildPITRArgs(new TableName[] { tableName1 }, + new TableName[] { restoredTable }, restoreTs); + int ret = ToolRunner.run(conf1, new PointInTimeRestoreDriver(), args); + assertNotEquals("Restore should fail since there is one bulkload without any backup", 0, ret); + + // force restore + args = TestPointInTimeRestore.buildPITRArgs(new TableName[] { tableName1 }, + new TableName[] { restoredTable }, restoreTs, true); + ret = ToolRunner.run(conf1, new PointInTimeRestoreDriver(), args); + assertEquals("Restore should succeed", 0, ret); + } finally { conf1.setBoolean(REPLICATION_MARKER_ENABLED_KEY, REPLICATION_MARKER_ENABLED_DEFAULT); } } @@ -208,7 +269,8 @@ private void verifyTable(Table t1) throws IOException { assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN)); } - private void performBulkLoad(String keyPrefix, String testDir) throws IOException { + private void performBulkLoad(String keyPrefix, String testDir, TableName tableName) + throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path baseDirectory = TEST_UTIL.getDataTestDirOnTestFS(testDir); Path hfilePath = @@ -220,7 +282,7 @@ private void performBulkLoad(String keyPrefix, String testDir) throws IOExceptio listFiles(fs, baseDirectory, baseDirectory); Map result = - BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(table1, baseDirectory); + BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tableName, baseDirectory); assertFalse(result.isEmpty()); } @@ -246,7 +308,7 @@ private static Set listFiles(final FileSystem fs, final Path root, final protected static void loadTable(Table table) throws Exception { Put p; // 100 + 1 row to t1_syncup for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { - p = new Put(Bytes.toBytes("row" + i)); + p = new Put(Bytes.toBytes("rowLoad" + i)); p.addColumn(famName, qualName, Bytes.toBytes("val" + i)); table.put(p); } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java index fb37977c4eee..a27c54e2a988 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_WAL_DIR; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_ENABLE_CONTINUOUS_BACKUP; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_RESTORE; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_MAPPING; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TO_DATETIME; @@ -228,15 +229,27 @@ public void testPointInTimeRestore_SuccessfulRestoreForMultipleTables() throws E getRowCount(table2), getRowCount(restoredTable2)); } - private String[] buildPITRArgs(TableName[] sourceTables, TableName[] targetTables, long endTime) { + public static String[] buildPITRArgs(TableName[] sourceTables, TableName[] targetTables, + long endTime) { + return buildPITRArgs(sourceTables, targetTables, endTime, false); + } + + public static String[] buildPITRArgs(TableName[] sourceTables, TableName[] targetTables, + long endTime, boolean force) { String sourceTableNames = Arrays.stream(sourceTables).map(TableName::getNameAsString).collect(Collectors.joining(",")); String targetTableNames = Arrays.stream(targetTables).map(TableName::getNameAsString).collect(Collectors.joining(",")); - return new String[] { "-" + OPTION_TABLE, sourceTableNames, "-" + OPTION_TABLE_MAPPING, - targetTableNames, "-" + OPTION_TO_DATETIME, String.valueOf(endTime) }; + if (force) { + return new String[] { "-" + OPTION_TABLE, sourceTableNames, "-" + OPTION_TABLE_MAPPING, + targetTableNames, "-" + OPTION_TO_DATETIME, String.valueOf(endTime), + "-" + OPTION_FORCE_RESTORE }; + } else { + return new String[] { "-" + OPTION_TABLE, sourceTableNames, "-" + OPTION_TABLE_MAPPING, + targetTableNames, "-" + OPTION_TO_DATETIME, String.valueOf(endTime) }; + } } private static String[] buildBackupArgs(String backupType, TableName[] tables, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 98e6631e3055..7c6e9c010250 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -1195,6 +1195,11 @@ public int run(String[] args) throws Exception { public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); int ret = ToolRunner.run(conf, new BulkLoadHFilesTool(conf), args); + if (ret == 0) { + System.out.println("Bulk load completed successfully."); + System.out.println("IMPORTANT: Please take a backup of the table immediately if this table " + + "is part of continuous backup"); + } System.exit(ret); } From 0dbf4cb25e1e8d2d5b1e63d31de8e08437f81c8d Mon Sep 17 00:00:00 2001 From: Ankit Solomon Date: Sun, 20 Jul 2025 01:30:08 +0530 Subject: [PATCH 2/7] Removed force flag --- .../hbase/backup/BackupRestoreConstants.java | 6 ---- .../backup/PointInTimeRestoreDriver.java | 17 ++-------- .../backup/PointInTimeRestoreRequest.java | 12 ------- .../hbase/backup/impl/BackupAdminImpl.java | 34 ++++++++----------- .../hadoop/hbase/backup/impl/BulkLoad.java | 5 +-- .../TestIncrementalBackupWithContinuous.java | 8 +---- .../hbase/backup/TestPointInTimeRestore.java | 16 ++------- 7 files changed, 24 insertions(+), 74 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java index ea3f65a60bf1..e38b044d71d6 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -116,12 +116,6 @@ public interface BackupRestoreConstants { "Specifies a custom backup location for Point-In-Time Recovery (PITR). " + "If provided, this location will be used exclusively instead of deriving the path from the system table."; - String OPTION_FORCE_RESTORE = "f"; - String LONG_OPTION_FORCE_RESTORE = "force"; - String OPTION_FORCE_RESTORE_DESC = - "Flag to force Point-In-Time Recovery in case there is no backup post bulkload operation. " - + "This might result in data loss as these bulkloaded files will not be part of the restored table."; - String JOB_NAME_CONF_KEY = "mapreduce.job.name"; String BACKUP_CONFIG_STRING = diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java index 617471b32645..abdf52f14302 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreDriver.java @@ -18,11 +18,8 @@ package org.apache.hadoop.hbase.backup; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_WAL_DIR; -import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_FORCE_RESTORE; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_PITR_BACKUP_PATH; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_TO_DATETIME; -import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_RESTORE; -import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_RESTORE_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PITR_BACKUP_PATH; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PITR_BACKUP_PATH_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TO_DATETIME; @@ -72,12 +69,6 @@ protected int executeRestore(boolean check, TableName[] fromTables, TableName[] return -1; } - boolean force = cmd.hasOption(OPTION_FORCE_RESTORE); - if (force) { - LOG.debug("Found force option (-{}) in restore command, " - + "will force restore in case there is no backup post a bulkload", OPTION_FORCE_RESTORE); - } - String backupRootDir = cmd.getOptionValue(OPTION_PITR_BACKUP_PATH); try (final Connection conn = ConnectionFactory.createConnection(conf); @@ -110,10 +101,9 @@ protected int executeRestore(boolean check, TableName[] fromTables, TableName[] return -5; } - PointInTimeRestoreRequest pointInTimeRestoreRequest = - new PointInTimeRestoreRequest.Builder().withBackupRootDir(backupRootDir).withCheck(check) - .withFromTables(fromTables).withToTables(toTables).withOverwrite(isOverwrite) - .withToDateTime(endTime).withForce(force).build(); + PointInTimeRestoreRequest pointInTimeRestoreRequest = new PointInTimeRestoreRequest.Builder() + .withBackupRootDir(backupRootDir).withCheck(check).withFromTables(fromTables) + .withToTables(toTables).withOverwrite(isOverwrite).withToDateTime(endTime).build(); client.pointInTimeRestore(pointInTimeRestoreRequest); } catch (Exception e) { @@ -129,7 +119,6 @@ protected void addOptions() { addOptWithArg(OPTION_TO_DATETIME, LONG_OPTION_TO_DATETIME, OPTION_TO_DATETIME_DESC); addOptWithArg(OPTION_PITR_BACKUP_PATH, LONG_OPTION_PITR_BACKUP_PATH, OPTION_PITR_BACKUP_PATH_DESC); - addOptNoArg(OPTION_FORCE_RESTORE, LONG_OPTION_FORCE_RESTORE, OPTION_FORCE_RESTORE_DESC); } public static void main(String[] args) throws Exception { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java index b3c19813e3d0..f2462a1cfd18 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/PointInTimeRestoreRequest.java @@ -32,7 +32,6 @@ public final class PointInTimeRestoreRequest { private final TableName[] toTables; private final boolean overwrite; private final long toDateTime; - private final boolean force; private PointInTimeRestoreRequest(Builder builder) { this.backupRootDir = builder.backupRootDir; @@ -41,7 +40,6 @@ private PointInTimeRestoreRequest(Builder builder) { this.toTables = builder.toTables; this.overwrite = builder.overwrite; this.toDateTime = builder.toDateTime; - this.force = builder.force; } public String getBackupRootDir() { @@ -68,10 +66,6 @@ public long getToDateTime() { return toDateTime; } - public boolean isForce() { - return force; - } - public static class Builder { private String backupRootDir; private boolean check = false; @@ -79,7 +73,6 @@ public static class Builder { private TableName[] toTables; private boolean overwrite = false; private long toDateTime; - private boolean force; public Builder withBackupRootDir(String backupRootDir) { this.backupRootDir = backupRootDir; @@ -111,11 +104,6 @@ public Builder withToDateTime(long dateTime) { return this; } - public Builder withForce(boolean force) { - this.force = force; - return this; - } - public PointInTimeRestoreRequest build() { return new PointInTimeRestoreRequest(this); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index 520c5c285053..400068750416 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -555,14 +555,13 @@ public void pointInTimeRestore(PointInTimeRestoreRequest request) throws IOExcep */ private void defaultPointInTimeRestore(PointInTimeRestoreRequest request) throws IOException { long endTime = request.getToDateTime(); - boolean isForce = request.isForce(); validateRequestToTime(endTime); TableName[] sTableArray = request.getFromTables(); TableName[] tTableArray = resolveTargetTables(sTableArray, request.getToTables()); // Validate PITR requirements - validatePitr(endTime, sTableArray, tTableArray, isForce); + validatePitr(endTime, sTableArray, tTableArray); // If only validation is required, log and return if (request.isCheck()) { @@ -630,8 +629,8 @@ private TableName[] resolveTargetTables(TableName[] sourceTables, TableName[] ta * @param tTableArray The target tables where the restore will be performed. * @throws IOException If PITR is not possible due to missing continuous backup or backup images. */ - private void validatePitr(long endTime, TableName[] sTableArray, TableName[] tTableArray, - boolean isForce) throws IOException { + private void validatePitr(long endTime, TableName[] sTableArray, TableName[] tTableArray) + throws IOException { try (BackupSystemTable table = new BackupSystemTable(conn)) { // Retrieve the set of tables with continuous backup enabled Map continuousBackupTables = table.getContinuousBackupTableSet(); @@ -644,7 +643,7 @@ private void validatePitr(long endTime, TableName[] sTableArray, TableName[] tTa // Ensure a valid backup and WALs exist for PITR validateBackupAvailability(sTableArray, tTableArray, endTime, continuousBackupTables, - backupInfos, isForce); + backupInfos); } } @@ -670,12 +669,12 @@ private void validateContinuousBackup(TableName[] tables, * the remaining duration up to the end time. */ private void validateBackupAvailability(TableName[] sTableArray, TableName[] tTableArray, - long endTime, Map continuousBackupTables, List backupInfos, - boolean isForce) throws IOException { + long endTime, Map continuousBackupTables, List backupInfos) + throws IOException { for (int i = 0; i < sTableArray.length; i++) { if ( !canPerformPitr(sTableArray[i], tTableArray[i], endTime, continuousBackupTables, - backupInfos, isForce) + backupInfos) ) { String errorMsg = "Could not find a valid backup and WALs for PITR for table: " + sTableArray[i].getNameAsString(); @@ -689,16 +688,16 @@ private void validateBackupAvailability(TableName[] sTableArray, TableName[] tTa * Checks whether PITR can be performed for a given source-target table pair. */ private boolean canPerformPitr(TableName stableName, TableName tTableName, long endTime, - Map continuousBackupTables, List backupInfos, boolean isForce) { - return getValidBackupInfo(stableName, tTableName, endTime, continuousBackupTables, backupInfos, - isForce) != null; + Map continuousBackupTables, List backupInfos) { + return getValidBackupInfo(stableName, tTableName, endTime, continuousBackupTables, backupInfos) + != null; } /** * Finds a valid backup for PITR that meets the required conditions. */ private BackupInfo getValidBackupInfo(TableName sTableName, TableName tTablename, long endTime, - Map continuousBackupTables, List backupInfos, boolean isForce) { + Map continuousBackupTables, List backupInfos) { for (BackupInfo info : backupInfos) { if (isValidBackupForPitr(info, sTableName, endTime, continuousBackupTables)) { @@ -709,9 +708,7 @@ private BackupInfo getValidBackupInfo(TableName sTableName, TableName tTablename try { if (validateRequest(restoreRequest)) { // check if any bulkload entry exists post this backup time and before "endtime" - if (!isForce) { - checkBulkLoadAfterBackup(conn, sTableName, info, endTime); - } + checkBulkLoadAfterBackup(conn, sTableName, info, endTime); return info; } } catch (IOException e) { @@ -741,8 +738,7 @@ private void checkBulkLoadAfterBackup(Connection conn, TableName sTableName, Bac (info.getType() == BackupType.FULL) ? info.getStartTs() : info.getIncrCommittedWalTs(); if (lastBackupTs < load.getTimestamp() && load.getTimestamp() < endTime) { throw new IOException("Bulk load operation detected after last successful backup for " - + "table: " + sTableName + ". Please use --force flag if you still want to restore but " - + "these bulk-loaded files will not be part of the restored table."); + + "table: " + sTableName); } } } @@ -776,8 +772,8 @@ private boolean isValidBackupForPitr(BackupInfo info, TableName tableName, long private void restoreTableWithWalReplay(TableName sourceTable, TableName targetTable, long endTime, Map continuousBackupTables, List backupInfos, PointInTimeRestoreRequest request) throws IOException { - BackupInfo backupInfo = getValidBackupInfo(sourceTable, targetTable, endTime, - continuousBackupTables, backupInfos, request.isForce()); + BackupInfo backupInfo = + getValidBackupInfo(sourceTable, targetTable, endTime, continuousBackupTables, backupInfos); if (backupInfo == null) { String errorMsg = "Could not find a valid backup and WALs for PITR for table: " + sourceTable.getNameAsString(); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java index 34f007cc8411..c5e4696185ad 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java @@ -87,13 +87,14 @@ public boolean equals(Object o) { @Override public int hashCode() { return new HashCodeBuilder().append(tableName).append(region).append(columnFamily) - .append(hfilePath).append(rowKey).toHashCode(); + .append(hfilePath).append(rowKey).append(timestamp).toHashCode(); } @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.NO_CLASS_NAME_STYLE) .append("tableName", tableName).append("region", region).append("columnFamily", columnFamily) - .append("hfilePath", hfilePath).append("rowKey", rowKey).toString(); + .append("hfilePath", hfilePath).append("rowKey", rowKey).append("timestamp", timestamp) + .toString(); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java index 55de725416cb..15839725d55f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java @@ -205,7 +205,7 @@ public void testContinuousBackupWithIncrementalBackupAndBulkloadSuccess() throws } @Test - public void testForcePITR() throws Exception { + public void testPitrFailureDueToMissingBackupPostBulkload() throws Exception { conf1.setBoolean(REPLICATION_MARKER_ENABLED_KEY, true); String methodName = Thread.currentThread().getStackTrace()[1].getMethodName(); TableName tableName1 = TableName.valueOf("table_" + methodName); @@ -251,12 +251,6 @@ public void testForcePITR() throws Exception { new TableName[] { restoredTable }, restoreTs); int ret = ToolRunner.run(conf1, new PointInTimeRestoreDriver(), args); assertNotEquals("Restore should fail since there is one bulkload without any backup", 0, ret); - - // force restore - args = TestPointInTimeRestore.buildPITRArgs(new TableName[] { tableName1 }, - new TableName[] { restoredTable }, restoreTs, true); - ret = ToolRunner.run(conf1, new PointInTimeRestoreDriver(), args); - assertEquals("Restore should succeed", 0, ret); } finally { conf1.setBoolean(REPLICATION_MARKER_ENABLED_KEY, REPLICATION_MARKER_ENABLED_DEFAULT); } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java index a27c54e2a988..9964f0aa7e93 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_WAL_DIR; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_ENABLE_CONTINUOUS_BACKUP; -import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_RESTORE; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_MAPPING; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TO_DATETIME; @@ -231,25 +230,14 @@ public void testPointInTimeRestore_SuccessfulRestoreForMultipleTables() throws E public static String[] buildPITRArgs(TableName[] sourceTables, TableName[] targetTables, long endTime) { - return buildPITRArgs(sourceTables, targetTables, endTime, false); - } - - public static String[] buildPITRArgs(TableName[] sourceTables, TableName[] targetTables, - long endTime, boolean force) { String sourceTableNames = Arrays.stream(sourceTables).map(TableName::getNameAsString).collect(Collectors.joining(",")); String targetTableNames = Arrays.stream(targetTables).map(TableName::getNameAsString).collect(Collectors.joining(",")); - if (force) { - return new String[] { "-" + OPTION_TABLE, sourceTableNames, "-" + OPTION_TABLE_MAPPING, - targetTableNames, "-" + OPTION_TO_DATETIME, String.valueOf(endTime), - "-" + OPTION_FORCE_RESTORE }; - } else { - return new String[] { "-" + OPTION_TABLE, sourceTableNames, "-" + OPTION_TABLE_MAPPING, - targetTableNames, "-" + OPTION_TO_DATETIME, String.valueOf(endTime) }; - } + return new String[] { "-" + OPTION_TABLE, sourceTableNames, "-" + OPTION_TABLE_MAPPING, + targetTableNames, "-" + OPTION_TO_DATETIME, String.valueOf(endTime) }; } private static String[] buildBackupArgs(String backupType, TableName[] tables, From 48ffcff431f7f28499537e5d2178f167f31c4553 Mon Sep 17 00:00:00 2001 From: Ankit Solomon Date: Sun, 20 Jul 2025 02:03:10 +0530 Subject: [PATCH 3/7] Minor fix --- .../main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java index c5e4696185ad..1befe7c469cc 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BulkLoad.java @@ -81,7 +81,7 @@ public boolean equals(Object o) { BulkLoad that = (BulkLoad) o; return new EqualsBuilder().append(tableName, that.tableName).append(region, that.region) .append(columnFamily, that.columnFamily).append(hfilePath, that.hfilePath) - .append(rowKey, that.rowKey).isEquals(); + .append(rowKey, that.rowKey).append(timestamp, that.timestamp).isEquals(); } @Override From 8046cc2b83cc9bbe9f39cf7d71c09bb4999a6267 Mon Sep 17 00:00:00 2001 From: Ankit Solomon Date: Sun, 20 Jul 2025 10:36:59 +0530 Subject: [PATCH 4/7] Spotless fix --- .../backup/impl/AbstractPitrRestoreHandler.java | 14 +++++++------- .../hbase/backup/impl/BackupInfoAdapter.java | 8 ++++++-- .../hadoop/hbase/backup/impl/BackupManifest.java | 11 ++++++++--- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/AbstractPitrRestoreHandler.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/AbstractPitrRestoreHandler.java index 76b281b9638b..8072277bf684 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/AbstractPitrRestoreHandler.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/AbstractPitrRestoreHandler.java @@ -26,7 +26,6 @@ import static org.apache.hadoop.hbase.mapreduce.WALPlayer.IGNORE_EMPTY_FILES; import java.io.IOException; -import org.apache.hadoop.hbase.backup.BackupType; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -42,6 +41,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.PointInTimeRestoreRequest; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.util.BackupUtils; @@ -271,14 +271,14 @@ private PitrBackupMetadata getValidBackup(TableName sTableName, TableName tTable * @param endTime Target recovery time * @throws IOException if a bulkload entry is found in between backup time and endtime */ - private void checkBulkLoadAfterBackup(Connection conn, TableName sTableName, PitrBackupMetadata backup, - long endTime) throws IOException { + private void checkBulkLoadAfterBackup(Connection conn, TableName sTableName, + PitrBackupMetadata backup, long endTime) throws IOException { try (BackupSystemTable backupSystemTable = new BackupSystemTable(conn)) { - List bulkLoads = - backupSystemTable.readBulkloadRows(List.of(sTableName)); + List bulkLoads = backupSystemTable.readBulkloadRows(List.of(sTableName)); for (BulkLoad load : bulkLoads) { - long lastBackupTs = - (backup.getType() == BackupType.FULL) ? backup.getStartTs() : backup.getIncrCommittedWalTs(); + long lastBackupTs = (backup.getType() == BackupType.FULL) + ? backup.getStartTs() + : backup.getIncrCommittedWalTs(); if (lastBackupTs < load.getTimestamp() && load.getTimestamp() < endTime) { throw new IOException("Bulk load operation detected after last successful backup for " + "table: " + sTableName); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupInfoAdapter.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupInfoAdapter.java index df3c379f0c4b..34d812121e02 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupInfoAdapter.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupInfoAdapter.java @@ -60,8 +60,12 @@ public String getRootDir() { } @Override - public BackupType getType() { return info.getType(); } + public BackupType getType() { + return info.getType(); + } @Override - public long getIncrCommittedWalTs() { return info.getIncrCommittedWalTs(); } + public long getIncrCommittedWalTs() { + return info.getIncrCommittedWalTs(); + } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index 9ee0c4cd2cb2..f35755d24512 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -159,7 +159,8 @@ static BackupImage fromProto(BackupProtos.BackupImage im) { ? BackupType.FULL : BackupType.INCREMENTAL; - BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs, incrCommittedWalTs); + BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs, + incrCommittedWalTs); for (BackupProtos.BackupImage img : ancestorList) { image.addAncestor(fromProto(img)); } @@ -296,9 +297,13 @@ public long getCompleteTs() { return completeTs; } - public long getIncrCommittedWalTs() { return incrCommittedWalTs; } + public long getIncrCommittedWalTs() { + return incrCommittedWalTs; + } - public void setIncrCommittedWalTs(long incrCommittedWalTs) { this.incrCommittedWalTs = incrCommittedWalTs; } + public void setIncrCommittedWalTs(long incrCommittedWalTs) { + this.incrCommittedWalTs = incrCommittedWalTs; + } private void setCompleteTs(long completeTs) { this.completeTs = completeTs; From 1796c61a538d3462b17431ec2dd0cf440a7d5f91 Mon Sep 17 00:00:00 2001 From: Ankit Solomon Date: Mon, 21 Jul 2025 15:35:02 +0530 Subject: [PATCH 5/7] Minor fix --- .../apache/hadoop/hbase/backup/TestPointInTimeRestore.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java index 343986667c67..e9a0b50abcfa 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java @@ -67,8 +67,8 @@ private static void setUpBackups() throws Exception { // Simulate a backup taken 20 days ago EnvironmentEdgeManager .injectEdge(() -> System.currentTimeMillis() - 20 * ONE_DAY_IN_MILLISECONDS); - PITRTestUtil.loadRandomData(TEST_UTIL, table1, famName, 1000); // Insert initial data into - // table1 + // Insert initial data into table1 + PITRTestUtil.loadRandomData(TEST_UTIL, table1, famName, 1000); // Perform a full backup for table1 with continuous backup enabled String[] args = From f642f2d2a7ff76360d60c5b6c19571e4876abd46 Mon Sep 17 00:00:00 2001 From: Ankit Solomon Date: Tue, 22 Jul 2025 15:39:37 +0530 Subject: [PATCH 6/7] Checkstyle fix --- .../hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java index 31cd93143501..86d11aeea10c 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java @@ -229,7 +229,6 @@ public void testPitrFailureDueToMissingBackupPostBulkload() throws Exception { assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size()); loadTable(TEST_UTIL.getConnection().getTable(tableName1)); - expectedRowCount = expectedRowCount + NB_ROWS_IN_BATCH; Thread.sleep(5000); // Incremental backup From 694511b11a4735cccdfd7be449ba16f967b51e09 Mon Sep 17 00:00:00 2001 From: Ankit Solomon Date: Wed, 23 Jul 2025 09:30:23 +0530 Subject: [PATCH 7/7] Removed unused variables --- .../hbase/backup/TestIncrementalBackupWithContinuous.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java index 4348c0329025..0978ff3ebef5 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java @@ -66,8 +66,6 @@ public class TestIncrementalBackupWithContinuous extends TestContinuousBackup { private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackupWithContinuous.class); - private byte[] ROW = Bytes.toBytes("row1"); - private final byte[] COLUMN = Bytes.toBytes("col"); private static final int ROWS_IN_BULK_LOAD = 100; @Test