Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.hadoop.hbase.backup.PointInTimeRestoreRequest;
import org.apache.hadoop.hbase.backup.RestoreRequest;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
Expand Down Expand Up @@ -249,8 +248,6 @@ private PitrBackupMetadata getValidBackup(TableName sTableName, TableName tTable

try {
if (backupAdmin.validateRequest(restoreRequest)) {
// check if any bulkload entry exists post this backup time and before "endtime"
checkBulkLoadAfterBackup(conn, sTableName, backup, endTime);
return backup;
}
} catch (IOException e) {
Expand All @@ -262,31 +259,6 @@ private PitrBackupMetadata getValidBackup(TableName sTableName, TableName tTable
return null;
}

/**
* Checks if any bulk load operation occurred for the specified table post last successful backup
* and before restore time.
* @param conn Active HBase connection
* @param sTableName Table for which to check bulk load history
* @param backup Last successful backup before the target recovery time
* @param endTime Target recovery time
* @throws IOException if a bulkload entry is found in between backup time and endtime
*/
private void checkBulkLoadAfterBackup(Connection conn, TableName sTableName,
PitrBackupMetadata backup, long endTime) throws IOException {
try (BackupSystemTable backupSystemTable = new BackupSystemTable(conn)) {
List<BulkLoad> bulkLoads = backupSystemTable.readBulkloadRows(List.of(sTableName));
for (BulkLoad load : bulkLoads) {
long lastBackupTs = (backup.getType() == BackupType.FULL)
? backup.getStartTs()
: backup.getIncrCommittedWalTs();
if (lastBackupTs < load.getTimestamp() && load.getTimestamp() < endTime) {
throw new IOException("Bulk load operation detected after last successful backup for "
+ "table: " + sTableName);
}
}
}
}

/**
* Determines if the given backup is valid for PITR.
* <p>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.yetus.audience.InterfaceAudience;

Expand Down Expand Up @@ -58,14 +57,4 @@ public String getBackupId() {
public String getRootDir() {
return image.getRootDir();
}

@Override
public BackupType getType() {
return image.getType();
}

@Override
public long getIncrCommittedWalTs() {
return image.getIncrCommittedWalTs();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupInfo;
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.yetus.audience.InterfaceAudience;

/**
Expand Down Expand Up @@ -58,14 +57,4 @@ public String getBackupId() {
public String getRootDir() {
return info.getBackupRootDir();
}

@Override
public BackupType getType() {
return info.getType();
}

@Override
public long getIncrCommittedWalTs() {
return info.getIncrCommittedWalTs();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,6 @@ Builder withCompleteTime(long completeTime) {
return this;
}

Builder withIncrCommittedWalTs(long incrCommittedWalTs) {
image.setIncrCommittedWalTs(incrCommittedWalTs);
return this;
}

BackupImage build() {
return image;
}
Expand All @@ -120,7 +115,6 @@ BackupImage build() {
private long completeTs;
private ArrayList<BackupImage> ancestors;
private Map<TableName, Map<String, Long>> incrTimeRanges;
private long incrCommittedWalTs;

static Builder newBuilder() {
return new Builder();
Expand All @@ -131,22 +125,20 @@ public BackupImage() {
}

private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList,
long startTs, long completeTs, long incrCommittedWalTs) {
long startTs, long completeTs) {
this.backupId = backupId;
this.type = type;
this.rootDir = rootDir;
this.tableList = tableList;
this.startTs = startTs;
this.completeTs = completeTs;
this.incrCommittedWalTs = incrCommittedWalTs;
}

static BackupImage fromProto(BackupProtos.BackupImage im) {
String backupId = im.getBackupId();
String rootDir = im.getBackupRootDir();
long startTs = im.getStartTs();
long completeTs = im.getCompleteTs();
long incrCommittedWalTs = im.getIncrCommittedWalTs();
List<HBaseProtos.TableName> tableListList = im.getTableListList();
List<TableName> tableList = new ArrayList<>();
for (HBaseProtos.TableName tn : tableListList) {
Expand All @@ -159,8 +151,7 @@ static BackupImage fromProto(BackupProtos.BackupImage im) {
? BackupType.FULL
: BackupType.INCREMENTAL;

BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs,
incrCommittedWalTs);
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
for (BackupProtos.BackupImage img : ancestorList) {
image.addAncestor(fromProto(img));
}
Expand All @@ -179,7 +170,6 @@ BackupProtos.BackupImage toProto() {
builder.setBackupId(backupId);
builder.setCompleteTs(completeTs);
builder.setStartTs(startTs);
builder.setIncrCommittedWalTs(incrCommittedWalTs);
if (type == BackupType.FULL) {
builder.setBackupType(BackupProtos.BackupType.FULL);
} else {
Expand Down Expand Up @@ -297,14 +287,6 @@ public long getCompleteTs() {
return completeTs;
}

public long getIncrCommittedWalTs() {
return incrCommittedWalTs;
}

public void setIncrCommittedWalTs(long incrCommittedWalTs) {
this.incrCommittedWalTs = incrCommittedWalTs;
}

private void setCompleteTs(long completeTs) {
this.completeTs = completeTs;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupInfo;
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.yetus.audience.InterfaceAudience;

Expand Down Expand Up @@ -48,10 +47,4 @@ public interface PitrBackupMetadata {

/** Returns Root directory where the backup is stored */
String getRootDir();

/** Returns backup type */
BackupType getType();

/** Returns incrCommittedWalTs */
long getIncrCommittedWalTs();
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import static org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore.REPLICATION_MARKER_ENABLED_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;

import java.io.IOException;
Expand All @@ -48,7 +47,6 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.HFileTestUtil;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
Expand Down Expand Up @@ -190,54 +188,6 @@ public void testIncrementalBackupCopyingBulkloadTillIncrCommittedWalTs() throws
}
}

@Test
public void testPitrFailureDueToMissingBackupPostBulkload() throws Exception {
String methodName = Thread.currentThread().getStackTrace()[1].getMethodName();
TableName tableName1 = TableName.valueOf("table_" + methodName);
TEST_UTIL.createTable(tableName1, famName);
try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {

// The test starts with no data, and no bulk loaded rows.
int expectedRowCount = 0;
assertEquals(expectedRowCount, TEST_UTIL.countRows(tableName1));
assertTrue(systemTable.readBulkloadRows(List.of(tableName1)).isEmpty());

// Create continuous backup, bulk loads are now being tracked
String backup1 = backupTables(BackupType.FULL, List.of(tableName1), BACKUP_ROOT_DIR, true);
assertTrue(checkSucceeded(backup1));

loadTable(TEST_UTIL.getConnection().getTable(tableName1));
expectedRowCount = expectedRowCount + NB_ROWS_IN_BATCH;
performBulkLoad("bulkPreIncr", methodName, tableName1);
expectedRowCount += ROWS_IN_BULK_LOAD;
assertEquals(expectedRowCount, TEST_UTIL.countRows(tableName1));
assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size());

loadTable(TEST_UTIL.getConnection().getTable(tableName1));
Thread.sleep(5000);

// Incremental backup
String backup2 =
backupTables(BackupType.INCREMENTAL, List.of(tableName1), BACKUP_ROOT_DIR, true);
assertTrue(checkSucceeded(backup2));
assertEquals(0, systemTable.readBulkloadRows(List.of(tableName1)).size());

performBulkLoad("bulkPostIncr", methodName, tableName1);
assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size());

loadTable(TEST_UTIL.getConnection().getTable(tableName1));
Thread.sleep(10000);
long restoreTs = BackupUtils.getReplicationCheckpoint(TEST_UTIL.getConnection());

// expect restore failure due to no backup post bulkPostIncr bulkload
TableName restoredTable = TableName.valueOf("restoredTable");
String[] args = PITRTestUtil.buildPITRArgs(new TableName[] { tableName1 },
new TableName[] { restoredTable }, restoreTs, null);
int ret = ToolRunner.run(conf1, new PointInTimeRestoreDriver(), args);
assertNotEquals("Restore should fail since there is one bulkload without any backup", 0, ret);
}
}

private void performBulkLoad(String keyPrefix, String testDir, TableName tableName)
throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ private static void setUpBackups() throws Exception {
// Simulate a backup taken 20 days ago
EnvironmentEdgeManager
.injectEdge(() -> System.currentTimeMillis() - 20 * ONE_DAY_IN_MILLISECONDS);
// Insert initial data into table1
PITRTestUtil.loadRandomData(TEST_UTIL, table1, famName, 1000);
PITRTestUtil.loadRandomData(TEST_UTIL, table1, famName, 1000); // Insert initial data into
// table1

// Perform a full backup for table1 with continuous backup enabled
String[] args =
Expand Down
1 change: 0 additions & 1 deletion hbase-protocol-shaded/src/main/protobuf/Backup.proto
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ message BackupImage {
optional uint64 complete_ts = 6;
repeated BackupImage ancestors = 7;
repeated TableServerTimestamp tst_map = 8;
optional uint64 incr_committed_wal_ts = 9;

}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1197,11 +1197,6 @@ public int run(String[] args) throws Exception {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
int ret = ToolRunner.run(conf, new BulkLoadHFilesTool(conf), args);
if (ret == 0) {
System.out.println("Bulk load completed successfully.");
System.out.println("IMPORTANT: Please take a backup of the table immediately if this table "
+ "is part of continuous backup");
}
System.exit(ret);
}

Expand Down