Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.debug;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import org.apache.commons.lang3.tuple.Pair;
Expand Down Expand Up @@ -68,6 +69,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertNotNull;

/**
* This class tests `ozone debug ldb` CLI that reads from a RocksDB directory.
Expand Down Expand Up @@ -341,6 +343,50 @@ void testScanOfPipelinesWhenNoData() throws IOException {
assertEquals("", stderr.toString());
}

@Test
void testScanWithRecordsPerFile() throws IOException {
// Prepare dummy table
int recordsCount = 5;
prepareKeyTable(recordsCount);

String scanDir1 = tempDir.getAbsolutePath() + "/scandir1";
// Prepare scan args
int maxRecordsPerFile = 2;
List<String> completeScanArgs1 = new ArrayList<>(Arrays.asList(
"--db", dbStore.getDbLocation().getAbsolutePath(),
"scan",
"--column-family", KEY_TABLE, "--out", scanDir1 + File.separator + "keytable",
"--max-records-per-file", String.valueOf(maxRecordsPerFile)));
File tmpDir1 = new File(scanDir1);
tmpDir1.deleteOnExit();

int exitCode1 = cmd.execute(completeScanArgs1.toArray(new String[0]));
assertEquals(0, exitCode1);
assertTrue(tmpDir1.isDirectory());
File[] subFiles = tmpDir1.listFiles();
assertNotNull(subFiles);
assertEquals(Math.ceil(recordsCount / (maxRecordsPerFile * 1.0)), subFiles.length);
for (File subFile : subFiles) {
JsonNode jsonNode = MAPPER.readTree(subFile);
assertNotNull(jsonNode);
}

String scanDir2 = tempDir.getAbsolutePath() + "/scandir2";
// Used with parameter '-l'
List<String> completeScanArgs2 = new ArrayList<>(Arrays.asList(
"--db", dbStore.getDbLocation().getAbsolutePath(),
"scan",
"--column-family", KEY_TABLE, "--out", scanDir2 + File.separator + "keytable",
"--max-records-per-file", String.valueOf(maxRecordsPerFile), "-l", "2"));
File tmpDir2 = new File(scanDir2);
tmpDir2.deleteOnExit();

int exitCode2 = cmd.execute(completeScanArgs2.toArray(new String[0]));
assertEquals(0, exitCode2);
assertTrue(tmpDir2.isDirectory());
assertEquals(1, tmpDir2.listFiles().length);
}

@Test
void testSchemaCommand() throws IOException {
// Prepare dummy table
Expand Down Expand Up @@ -389,22 +435,7 @@ private void prepareTable(String tableName, boolean schemaV3)

switch (tableName) {
case KEY_TABLE:
// Dummy om.db with only keyTable
dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db")
.setPath(tempDir.toPath()).addTable(KEY_TABLE).build();

Table<byte[], byte[]> keyTable = dbStore.getTable(KEY_TABLE);
// Insert 5 keys
for (int i = 1; i <= 5; i++) {
String key = "key" + i;
OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1",
key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, HddsProtos.ReplicationFactor.ONE)).build();
keyTable.put(key.getBytes(UTF_8),
value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray());

// Populate map
dbMap.put(key, toMap(value));
}
prepareKeyTable(5);
break;

case BLOCK_DATA:
Expand Down Expand Up @@ -452,6 +483,29 @@ private void prepareTable(String tableName, boolean schemaV3)
}
}

/**
* Prepare the keytable for testing.
* @param recordsCount prepare the number of keys
*/
private void prepareKeyTable(int recordsCount) throws IOException {
if (recordsCount < 1) {
throw new IllegalArgumentException("recordsCount must be greater than 1.");
}
// Dummy om.db with only keyTable
dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db")
.setPath(tempDir.toPath()).addTable(KEY_TABLE).build();
Table<byte[], byte[]> keyTable = dbStore.getTable(KEY_TABLE);
for (int i = 1; i <= recordsCount; i++) {
String key = "key" + i;
OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1",
key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE,
HddsProtos.ReplicationFactor.ONE)).build();
keyTable.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray());
// Populate map
dbMap.put(key, toMap(value));
}
}

private static Map<String, Object> toMap(Object obj) throws IOException {
ObjectWriter objectWriter = DBScanner.JsonSerializationHelper.getWriter();
String json = objectWriter.writeValueAsString(obj);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import picocli.CommandLine;

import java.io.BufferedWriter;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.lang.reflect.Field;
Expand Down Expand Up @@ -172,6 +173,14 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {
defaultValue = "10")
private int threadCount;

@CommandLine.Option(names = {"--max-records-per-file"},
description = "The number of records to print per file.",
defaultValue = "0")
private long recordsPerFile;

private int fileSuffix = 0;
private long globalCount = 0;

private static final String KEY_SEPARATOR_SCHEMA_V3 =
new OzoneConfiguration().getObject(DatanodeConfiguration.class)
.getContainerSchemaV3KeySeparator();
Expand All @@ -180,7 +189,8 @@ public class DBScanner implements Callable<Void>, SubcommandWithParent {

@Override
public Void call() throws Exception {

fileSuffix = 0;
globalCount = 0;
List<ColumnFamilyDescriptor> cfDescList =
RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath());
final List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
Expand Down Expand Up @@ -240,11 +250,29 @@ private boolean displayTable(ManagedRocksIterator iterator,
return displayTable(iterator, dbColumnFamilyDef, out(), schemaV3);
}

// If there are no parent directories, create them
File file = new File(fileName);
File parentFile = file.getParentFile();
if (!parentFile.exists()) {
boolean flg = parentFile.mkdirs();
if (!flg) {
throw new IOException("An exception occurred while creating " +
"the directory. Directorys: " + parentFile.getAbsolutePath());
}
}

// Write to file output
try (PrintWriter out = new PrintWriter(new BufferedWriter(
new PrintWriter(fileName, UTF_8.name())))) {
return displayTable(iterator, dbColumnFamilyDef, out, schemaV3);
while (iterator.get().isValid() && withinLimit(globalCount)) {
String fileNameTarget = recordsPerFile > 0 ? fileName + "." + fileSuffix++ :
fileName;
try (PrintWriter out = new PrintWriter(new BufferedWriter(
new PrintWriter(fileNameTarget, UTF_8.name())))) {
if (!displayTable(iterator, dbColumnFamilyDef, out, schemaV3)) {
return false;
}
}
}
return true;
}

private boolean displayTable(ManagedRocksIterator iterator,
Expand Down Expand Up @@ -314,7 +342,7 @@ private void processRecords(ManagedRocksIterator iterator,
}
}

while (withinLimit(count) && iterator.get().isValid() && !exception && !reachedEnd) {
while (withinLimit(globalCount) && iterator.get().isValid() && !exception && !reachedEnd) {
// if invalid endKey is given, it is ignored
if (null != endKey && Arrays.equals(iterator.get().key(), getValueObject(dbColumnFamilyDef, endKey))) {
reachedEnd = true;
Expand All @@ -326,6 +354,7 @@ private void processRecords(ManagedRocksIterator iterator,
// the record passes the filter
batch.add(new ByteArrayKeyValue(
iterator.get().key(), iterator.get().value()));
globalCount++;
count++;
if (batch.size() >= batchSize) {
while (logWriter.getInflightLogCount() > threadCount * 10L
Expand All @@ -343,6 +372,9 @@ private void processRecords(ManagedRocksIterator iterator,
}
}
iterator.get().next();
if ((recordsPerFile > 0) && (count >= recordsPerFile)) {
break;
}
}
if (!batch.isEmpty()) {
Future<Void> future = threadPool.submit(new Task(dbColumnFamilyDef,
Expand Down