ozone.om.snapshot.diff.max.allowed.keys.changed.per.job
10000000
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java
new file mode 100644
index 000000000000..46fbeb412a84
--- /dev/null
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import org.rocksdb.util.Environment;
+
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
+/**
+ * Class to write the rocksdb lib name to a file.
+ * This would be used to build native ozone_rocksdb_tools library.
+ */
+public final class JniLibNamePropertyWriter {
+
+ private JniLibNamePropertyWriter() {
+ }
+
+ public static void main(String[] args) {
+ String filePath = args[0];
+ try (Writer writer = new OutputStreamWriter(
+ Files.newOutputStream(Paths.get(filePath)), StandardCharsets.UTF_8)) {
+ writer.write("rocksdbLibName=" +
+ Environment.getJniLibraryFileName("rocksdb"));
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml
index 1b29bfcbd6af..6e92f57b6c18 100644
--- a/hadoop-hdds/rocks-native/pom.xml
+++ b/hadoop-hdds/rocks-native/pom.xml
@@ -57,13 +57,16 @@
mockito-inline
test
+
+ org.assertj
+ assertj-core
+
false
8
8
- https://zlib.net/fossils/zlib-${zlib.version}.tar.gz
@@ -120,79 +123,80 @@
- com.googlecode.maven-download-plugin
- download-maven-plugin
+ org.codehaus.mojo
+ exec-maven-plugin
- rocksdb source download
- generate-sources
+ set-property
+ initialize
- wget
+ java
- https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz
- rocksdb-v${rocksdb.version}.tar.gz
- ${project.build.directory}/rocksdb
-
-
-
- zlib source download
- generate-sources
-
- wget
-
-
- ${zlib.url}
- zlib-${zlib.version}.tar.gz
- ${project.build.directory}/zlib
-
-
-
- bzip2 source download
- generate-sources
-
- wget
-
-
- https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz
- bzip2-v${bzip2.version}.tar.gz
- ${project.build.directory}/bzip2
+ org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter
+
+ ${project.build.directory}/propertyFile.txt
+
+
+
+
+ org.codehaus.mojo
+ properties-maven-plugin
+
- lz4 source download
- generate-sources
+ read-property-from-file
+ initialize
- wget
+ read-project-properties
- https://github.com/lz4/lz4/archive/refs/tags/v${lz4.version}.tar.gz
- lz4-v${lz4.version}.tar.gz
- ${project.build.directory}/lz4
+
+ ${project.build.directory}/propertyFile.txt
+
+
+
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+
- snappy source download
- generate-sources
+ unpack-dependency
+ initialize
- wget
+ unpack
- https://github.com/google/snappy/archive/refs/tags/${snappy.version}.tar.gz
- snappy-v${snappy.version}.tar.gz
- ${project.build.directory}/snappy
+
+
+ org.rocksdb
+ rocksdbjni
+ jar
+ false
+ ${project.build.directory}/rocksdbjni
+
+
+
+
+
+ com.googlecode.maven-download-plugin
+ download-maven-plugin
+
- zstd source download
+ rocksdb source download
generate-sources
wget
- https://github.com/facebook/zstd/archive/refs/tags/v${zstd.version}.tar.gz
- zstd-v${zstd.version}.tar.gz
- ${project.build.directory}/zstd
+ https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz
+ rocksdb-v${rocksdb.version}.tar.gz
+ ${project.build.directory}/rocksdb
@@ -226,88 +230,6 @@
-
-
-
-
-
-
-
-
- run
-
-
-
- build-zlib
- process-sources
-
-
-
-
-
-
-
-
-
-
-
- run
-
-
-
- build-bzip2
- process-sources
-
-
-
-
-
-
-
-
- run
-
-
-
- build-lz4
- process-sources
-
-
-
-
-
-
-
-
- run
-
-
-
- build-zstd
- process-sources
-
-
-
-
-
-
-
-
- run
-
-
-
- build-snappy
- process-sources
-
-
-
-
-
-
-
-
-
@@ -326,11 +248,10 @@
-
-
-
+
+
-
+
@@ -353,14 +274,11 @@
-
-
-
-
-
-
+
+
-
+
@@ -430,8 +348,8 @@
${env.JAVA_HOME}/bin/javah
- org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool
- org.apache.hadoop.hdds.utils.db.managed.PipeInputStream
+ org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader
+ org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator
${project.build.directory}/native/javah
@@ -486,8 +404,8 @@
${project.build.outputDirectory}:${project.build.directory}/dependency/*
-h
${project.build.directory}/native/javah
- ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java
- ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java
+ ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
+ ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReaderIterator.java
diff --git a/hadoop-hdds/rocks-native/src/CMakeLists.txt b/hadoop-hdds/rocks-native/src/CMakeLists.txt
index 051660777493..4639e2a8c927 100644
--- a/hadoop-hdds/rocks-native/src/CMakeLists.txt
+++ b/hadoop-hdds/rocks-native/src/CMakeLists.txt
@@ -21,6 +21,7 @@
#
cmake_minimum_required(VERSION 2.8)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
project(ozone_native)
@@ -36,43 +37,18 @@ endif()
include_directories(${GENERATED_JAVAH})
if(${SST_DUMP_INCLUDE})
include_directories(${ROCKSDB_HEADERS})
- set(SOURCE_FILES ${NATIVE_DIR}/SSTDumpTool.cpp ${NATIVE_DIR}/PipeInputStream.cpp ${NATIVE_DIR}/Pipe.h ${NATIVE_DIR}/Pipe.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h)
- ADD_LIBRARY(rocksdb STATIC IMPORTED)
+ set(SOURCE_FILES ${NATIVE_DIR}/ManagedRawSSTFileReader.cpp ${NATIVE_DIR}/ManagedRawSSTFileIterator.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h)
+ ADD_LIBRARY(rocksdb SHARED IMPORTED)
set_target_properties(
rocksdb
PROPERTIES
- IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb.a)
+ IMPORTED_LOCATION ${ROCKSDB_LIB})
ADD_LIBRARY(rocks_tools STATIC IMPORTED)
set_target_properties(
rocks_tools
PROPERTIES
- IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb_tools.a)
- ADD_LIBRARY(bz2 STATIC IMPORTED)
- set_target_properties(
- bz2
- PROPERTIES
- IMPORTED_LOCATION ${BZIP2_LIB}/libbz2.a)
- ADD_LIBRARY(zlib STATIC IMPORTED)
- set_target_properties(
- zlib
- PROPERTIES
- IMPORTED_LOCATION ${ZLIB_LIB}/libz.a)
- ADD_LIBRARY(lz4 STATIC IMPORTED)
- set_target_properties(
- lz4
- PROPERTIES
- IMPORTED_LOCATION ${LZ4_LIB}/liblz4.a)
- ADD_LIBRARY(snappy STATIC IMPORTED)
- set_target_properties(
- snappy
- PROPERTIES
- IMPORTED_LOCATION ${SNAPPY_LIB}/libsnappy.a)
- ADD_LIBRARY(zstd STATIC IMPORTED)
- set_target_properties(
- zstd
- PROPERTIES
- IMPORTED_LOCATION ${ZSTD_LIB}/libzstd.a)
- set(linked_libraries ${linked_libraries} bz2 zlib rocks_tools rocksdb lz4 snappy zstd)
+ IMPORTED_LOCATION ${ROCKSDB_TOOLS_LIB}/librocksdb_tools.a)
+ set(linked_libraries ${linked_libraries} rocks_tools rocksdb)
endif()
add_library(ozone_rocksdb_tools SHARED ${SOURCE_FILES})
target_link_libraries(ozone_rocksdb_tools ${linked_libraries})
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java
index d3121144d37a..8937f0803a18 100644
--- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java
+++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java
@@ -26,6 +26,5 @@ public final class NativeConstants {
private NativeConstants() {
}
- public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME
- = "ozone_rocksdb_tools";
+ public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools";
}
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java
new file mode 100644
index 000000000000..02125951c1fe
--- /dev/null
+++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import com.google.common.primitives.UnsignedLong;
+import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.util.ClosableIterator;
+
+import java.util.Arrays;
+import java.util.NoSuchElementException;
+import java.util.function.Function;
+
+/**
+ * Iterator for SSTFileReader which would read all entries including tombstones.
+ */
+public class ManagedRawSSTFileIterator implements ClosableIterator {
+ // Native address of pointer to the object.
+ private final long nativeHandle;
+ private final Function transformer;
+
+ ManagedRawSSTFileIterator(long nativeHandle, Function transformer) {
+ this.nativeHandle = nativeHandle;
+ this.transformer = transformer;
+ }
+
+ private native boolean hasNext(long handle);
+ private native void next(long handle);
+ private native byte[] getKey(long handle);
+ private native byte[] getValue(long handle);
+ private native long getSequenceNumber(long handle);
+ private native int getType(long handle);
+
+ @Override
+ public boolean hasNext() {
+ return this.hasNext(nativeHandle);
+ }
+
+ @Override
+ public T next() {
+ if (!hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ KeyValue keyValue = new KeyValue(this.getKey(nativeHandle),
+ UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)),
+ this.getType(nativeHandle),
+ this.getValue(nativeHandle));
+ this.next(nativeHandle);
+ return this.transformer.apply(keyValue);
+ }
+
+ private native void closeInternal(long handle);
+
+ @Override
+ public void close() {
+ this.closeInternal(this.nativeHandle);
+ }
+
+ /**
+ * Class containing Parsed KeyValue Record from RawSstReader output.
+ */
+ public static final class KeyValue {
+
+ private final byte[] key;
+ private final UnsignedLong sequence;
+ private final Integer type;
+ private final byte[] value;
+
+ private KeyValue(byte[] key, UnsignedLong sequence, Integer type,
+ byte[] value) {
+ this.key = key;
+ this.sequence = sequence;
+ this.type = type;
+ this.value = value;
+ }
+
+ public byte[] getKey() {
+ return Arrays.copyOf(key, key.length);
+ }
+
+ public UnsignedLong getSequence() {
+ return sequence;
+ }
+
+ public Integer getType() {
+ return type;
+ }
+
+ public byte[] getValue() {
+ return Arrays.copyOf(value, value.length);
+ }
+
+ @Override
+ public String toString() {
+ return "KeyValue{" +
+ "key=" + StringUtils.bytes2String(key) +
+ ", sequence=" + sequence +
+ ", type=" + type +
+ ", value=" + StringUtils.bytes2String(value) +
+ '}';
+ }
+ }
+}
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
new file mode 100644
index 000000000000..7c8783b43948
--- /dev/null
+++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import org.apache.hadoop.hdds.utils.NativeLibraryLoader;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.util.function.Function;
+
+import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
+
+/**
+ * JNI for RocksDB RawSSTFileReader.
+ */
+public class ManagedRawSSTFileReader implements Closeable {
+
+ public static boolean loadLibrary() throws NativeLibraryNotLoadedException {
+ ManagedRocksObjectUtils.loadRocksDBLibrary();
+ if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) {
+ throw new NativeLibraryNotLoadedException(ROCKS_TOOLS_NATIVE_LIBRARY_NAME);
+ }
+ return true;
+ }
+
+ private final String fileName;
+ // Native address of pointer to the object.
+ private final long nativeHandle;
+ private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class);
+
+ public ManagedRawSSTFileReader(final ManagedOptions options, final String fileName, final int readAheadSize) {
+ this.fileName = fileName;
+ this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize);
+ }
+
+ public ManagedRawSSTFileIterator newIterator(
+ Function transformerFunction,
+ ManagedSlice fromSlice, ManagedSlice toSlice) {
+ long fromNativeHandle = fromSlice == null ? 0 : fromSlice.getNativeHandle();
+ long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle();
+ LOG.info("Iterating SST file: {} with native lib. " +
+ "LowerBound: {}, UpperBound: {}", fileName, fromSlice, toSlice);
+ return new ManagedRawSSTFileIterator<>(
+ newIterator(this.nativeHandle, fromSlice != null,
+ fromNativeHandle, toSlice != null, toNativeHandle),
+ transformerFunction);
+ }
+
+ private native long newRawSSTFileReader(long optionsHandle, String filePath, int readSize);
+
+
+ private native long newIterator(long handle, boolean hasFrom, long fromSliceHandle, boolean hasTo,
+ long toSliceHandle);
+
+ private native void disposeInternal(long handle);
+
+ @Override
+ public void close() {
+ disposeInternal(nativeHandle);
+ }
+}
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java
deleted file mode 100644
index d8844eaacbcd..000000000000
--- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.utils.db.managed;
-
-import com.google.common.collect.Maps;
-import com.google.common.primitives.UnsignedLong;
-import org.apache.hadoop.hdds.StringUtils;
-import org.apache.hadoop.util.ClosableIterator;
-import org.eclipse.jetty.io.RuntimeIOException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UncheckedIOException;
-import java.nio.ByteBuffer;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Arrays;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
-
-/**
- * Iterator to Parse output of RocksDBSSTDumpTool.
- */
-public abstract class ManagedSSTDumpIterator implements ClosableIterator {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(ManagedSSTDumpIterator.class);
- // Since we don't have any restriction on the key & value, we are prepending
- // the length of the pattern in the sst dump tool output.
- // The first token in the pattern is the key.
- // The second tells the sequence number of the key.
- // The third token gives the type of key in the sst file.
- // The fourth token
- private InputStream processOutput;
- private Optional currentKey;
- private byte[] intBuffer;
- private Optional nextKey;
-
- private ManagedSSTDumpTool.SSTDumpToolTask sstDumpToolTask;
- private AtomicBoolean open;
- private StackTraceElement[] stackTrace;
-
- public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool,
- String sstFilePath, ManagedOptions options)
- throws IOException {
- this(sstDumpTool, sstFilePath, options, null, null);
- }
-
- public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool,
- String sstFilePath, ManagedOptions options,
- ManagedSlice lowerKeyBound,
- ManagedSlice upperKeyBound)
- throws IOException {
- File sstFile = new File(sstFilePath);
- if (!sstFile.exists()) {
- throw new IOException(String.format("File in path : %s doesn't exist",
- sstFile.getAbsolutePath()));
- }
- if (!sstFile.isFile()) {
- throw new IOException(String.format("Path given: %s is not a file",
- sstFile.getAbsolutePath()));
- }
- init(sstDumpTool, sstFile, options, lowerKeyBound, upperKeyBound);
- this.stackTrace = Thread.currentThread().getStackTrace();
- }
-
- /**
- * Parses next occuring number in the stream.
- *
- * @return Optional of the integer empty if no integer exists
- */
- private Optional getNextNumberInStream() throws IOException {
- int n = processOutput.read(intBuffer, 0, 4);
- if (n == 4) {
- return Optional.of(ByteBuffer.wrap(intBuffer).getInt());
- } else if (n >= 0) {
- throw new IllegalStateException(String.format("Integer expects " +
- "4 bytes to be read from the stream, but read only %d bytes", n));
- }
- return Optional.empty();
- }
-
- private Optional getNextByteArray() throws IOException {
- Optional size = getNextNumberInStream();
- if (size.isPresent()) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Allocating byte array, size: {}", size.get());
- }
- byte[] b = new byte[size.get()];
- int n = processOutput.read(b);
- if (n >= 0 && n != size.get()) {
- throw new IllegalStateException(String.format("Integer expects " +
- "4 bytes to be read from the stream, but read only %d bytes", n));
- }
- return Optional.of(b);
- }
- return Optional.empty();
- }
-
- private Optional getNextUnsignedLong() throws IOException {
- long val = 0;
- for (int i = 0; i < 8; i++) {
- val = val << 8;
- int nextByte = processOutput.read();
- if (nextByte < 0) {
- if (i == 0) {
- return Optional.empty();
- }
- throw new IllegalStateException(String.format("Long expects " +
- "8 bytes to be read from the stream, but read only %d bytes", i));
- }
- val += nextByte;
- }
- return Optional.of(UnsignedLong.fromLongBits(val));
- }
-
- private void init(ManagedSSTDumpTool sstDumpTool, File sstFile,
- ManagedOptions options, ManagedSlice lowerKeyBound,
- ManagedSlice upperKeyBound) {
- Map argMap = Maps.newHashMap();
- argMap.put("file", sstFile.getAbsolutePath());
- argMap.put("silent", null);
- argMap.put("command", "scan");
- // strings containing '\0' do not have the same value when encode UTF-8 on
- // java which is 0. But in jni the utf-8 encoded value for '\0'
- // becomes -64 -128. Thus the value becomes different.
- // In order to support this, changes have been made on the rocks-tools
- // to pass the address of the ManagedSlice and the jni can use the object
- // of slice directly from there.
- if (Objects.nonNull(lowerKeyBound)) {
- argMap.put("from", String.valueOf(lowerKeyBound.getNativeHandle()));
- }
- if (Objects.nonNull(upperKeyBound)) {
- argMap.put("to", String.valueOf(upperKeyBound.getNativeHandle()));
- }
- this.sstDumpToolTask = sstDumpTool.run(argMap, options);
- processOutput = sstDumpToolTask.getPipedOutput();
- intBuffer = new byte[4];
- open = new AtomicBoolean(true);
- currentKey = Optional.empty();
- nextKey = Optional.empty();
- next();
- }
-
- /**
- * Throws Runtime exception in the case iterator is closed or
- * the native Dumptool exited with non zero exit value.
- */
- private void checkSanityOfProcess() {
- if (!this.open.get()) {
- throw new RuntimeException("Iterator has been closed");
- }
- if (sstDumpToolTask.getFuture().isDone() &&
- sstDumpToolTask.exitValue() != 0) {
- throw new RuntimeException("Process Terminated with non zero " +
- String.format("exit value %d", sstDumpToolTask.exitValue()));
- }
- }
-
- /**
- * Checks the status of the process & sees if there is another record.
- *
- * @return True if next exists & false otherwise
- * Throws Runtime Exception in case of SST File read failure
- */
-
- @Override
- public boolean hasNext() {
- checkSanityOfProcess();
- return nextKey.isPresent();
- }
-
- /**
- * Transforms Key to a certain value.
- *
- * @param value
- * @return transformed Value
- */
- protected abstract T getTransformedValue(Optional value);
-
- /**
- * Returns the next record from SSTDumpTool.
- *
- * @return next Key
- * Throws Runtime Exception incase of failure.
- */
- @Override
- public T next() {
- checkSanityOfProcess();
- currentKey = nextKey;
- nextKey = Optional.empty();
- try {
- Optional key = getNextByteArray();
- if (!key.isPresent()) {
- return getTransformedValue(currentKey);
- }
- UnsignedLong sequenceNumber = getNextUnsignedLong()
- .orElseThrow(() -> new IllegalStateException(
- String.format("Error while trying to read sequence number" +
- " for key %s", StringUtils.bytes2String(key.get()))));
-
- Integer type = getNextNumberInStream()
- .orElseThrow(() -> new IllegalStateException(
- String.format("Error while trying to read sequence number for " +
- "key %s with sequence number %s",
- StringUtils.bytes2String(key.get()),
- sequenceNumber.toString())));
- byte[] val = getNextByteArray().orElseThrow(() ->
- new IllegalStateException(
- String.format("Error while trying to read sequence number for " +
- "key %s with sequence number %s of type %d",
- StringUtils.bytes2String(key.get()),
- sequenceNumber.toString(), type)));
- nextKey = Optional.of(new KeyValue(key.get(), sequenceNumber, type, val));
- } catch (IOException e) {
- // TODO [SNAPSHOT] Throw custom snapshot exception
- throw new RuntimeIOException(e);
- }
- return getTransformedValue(currentKey);
- }
-
- @Override
- public synchronized void close() throws UncheckedIOException {
- if (this.sstDumpToolTask != null) {
- if (!this.sstDumpToolTask.getFuture().isDone()) {
- this.sstDumpToolTask.getFuture().cancel(true);
- }
- try {
- this.processOutput.close();
- } catch (IOException e) {
- throw new UncheckedIOException(e);
- }
- }
- open.compareAndSet(true, false);
- }
-
- @Override
- protected void finalize() throws Throwable {
- if (open.get()) {
- LOG.warn("{} is not closed properly." +
- " StackTrace for unclosed instance: {}",
- this.getClass().getName(),
- Arrays.stream(stackTrace)
- .map(StackTraceElement::toString).collect(
- Collectors.joining("\n")));
- }
- this.close();
- super.finalize();
- }
-
- /**
- * Class containing Parsed KeyValue Record from Sst Dumptool output.
- */
- public static final class KeyValue {
-
- private final byte[] key;
- private final UnsignedLong sequence;
- private final Integer type;
- private final byte[] value;
-
- private KeyValue(byte[] key, UnsignedLong sequence, Integer type,
- byte[] value) {
- this.key = key;
- this.sequence = sequence;
- this.type = type;
- this.value = value;
- }
-
- public byte[] getKey() {
- return key;
- }
-
- public UnsignedLong getSequence() {
- return sequence;
- }
-
- public Integer getType() {
- return type;
- }
-
- public byte[] getValue() {
- return value;
- }
-
- @Override
- public String toString() {
- return "KeyValue{" +
- "key=" + StringUtils.bytes2String(key) +
- ", sequence=" + sequence +
- ", type=" + type +
- ", value=" + StringUtils.bytes2String(value) +
- '}';
- }
- }
-}
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java
deleted file mode 100644
index 5d965d7398e0..000000000000
--- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db.managed;
-
-import org.apache.hadoop.hdds.utils.NativeLibraryLoader;
-import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
-
-import java.io.InputStream;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
-
-/**
- * JNI for RocksDB SSTDumpTool. Pipes the output to an output stream
- */
-public class ManagedSSTDumpTool {
-
- private int bufferCapacity;
- private ExecutorService executorService;
-
- public ManagedSSTDumpTool(ExecutorService executorService,
- int bufferCapacity)
- throws NativeLibraryNotLoadedException {
- if (!NativeLibraryLoader.getInstance()
- .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) {
- throw new NativeLibraryNotLoadedException(
- ROCKS_TOOLS_NATIVE_LIBRARY_NAME);
- }
- this.bufferCapacity = bufferCapacity;
- this.executorService = executorService;
- }
-
- public SSTDumpToolTask run(String[] args, ManagedOptions options) {
- PipeInputStream pipeInputStream = new PipeInputStream(bufferCapacity);
- return new SSTDumpToolTask(this.executorService.submit(() ->
- this.runInternal(args, options.getNativeHandle(),
- pipeInputStream.getNativeHandle())), pipeInputStream);
- }
-
- public SSTDumpToolTask run(Map args, ManagedOptions options) {
- return this.run(args.entrySet().stream().map(e -> "--"
- + (e.getValue() == null || e.getValue().isEmpty() ? e.getKey() :
- e.getKey() + "=" + e.getValue())).toArray(String[]::new), options);
- }
-
- private native int runInternal(String[] args, long optionsHandle,
- long pipeHandle);
-
- /**
- * Class holding piped output of SST Dumptool & future of command.
- */
- static class SSTDumpToolTask {
- private Future future;
- private InputStream pipedOutput;
-
- SSTDumpToolTask(Future future, InputStream pipedOutput) {
- this.future = future;
- this.pipedOutput = pipedOutput;
- }
-
- public Future getFuture() {
- return future;
- }
-
- public InputStream getPipedOutput() {
- return pipedOutput;
- }
-
- public int exitValue() {
- if (this.future.isDone()) {
- try {
- return future.get();
- } catch (InterruptedException | ExecutionException e) {
- return 1;
- }
- }
- return 0;
- }
- }
-}
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java
deleted file mode 100644
index df4f613f98e2..000000000000
--- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db.managed;
-
-import java.io.InputStream;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * JNI for reading data from pipe.
- */
-public class PipeInputStream extends InputStream {
-
- private byte[] byteBuffer;
- private long nativeHandle;
- private int numberOfBytesLeftToRead;
- private int index = 0;
- private int capacity;
-
- private AtomicBoolean cleanup;
-
- PipeInputStream(int capacity) {
- this.byteBuffer = new byte[capacity];
- this.numberOfBytesLeftToRead = 0;
- this.capacity = capacity;
- this.nativeHandle = newPipe();
- this.cleanup = new AtomicBoolean(false);
- }
-
- long getNativeHandle() {
- return nativeHandle;
- }
-
- @Override
- public int read() {
- if (numberOfBytesLeftToRead < 0) {
- this.close();
- return -1;
- }
- while (numberOfBytesLeftToRead == 0) {
- numberOfBytesLeftToRead = readInternal(byteBuffer, capacity,
- nativeHandle);
- index = 0;
- if (numberOfBytesLeftToRead != 0) {
- return read();
- }
- }
- numberOfBytesLeftToRead--;
- int ret = byteBuffer[index] & 0xFF;
- index += 1;
- return ret;
- }
-
- private native long newPipe();
-
- private native int readInternal(byte[] buff, int numberOfBytes,
- long pipeHandle);
-
- private native void closeInternal(long pipeHandle);
-
- @Override
- public void close() {
- if (this.cleanup.compareAndSet(false, true)) {
- closeInternal(this.nativeHandle);
- }
- }
-
- @Override
- protected void finalize() throws Throwable {
- close();
- super.finalize();
- }
-}
diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp
new file mode 100644
index 000000000000..1cf222528379
--- /dev/null
+++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator.h"
+#include "rocksdb/options.h"
+#include "rocksdb/raw_iterator.h"
+#include
+#include "cplusplus_to_java_convert.h"
+#include
+
+jboolean Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj,
+ jlong native_handle) {
+ return static_cast(reinterpret_cast(native_handle)->Valid());
+}
+
+void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj,
+ jlong native_handle) {
+ reinterpret_cast(native_handle)->Next();
+}
+
+jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getKey(JNIEnv *env,
+ jobject obj,
+ jlong native_handle) {
+ ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->key();
+ jbyteArray jkey = env->NewByteArray(static_cast(slice.size()));
+ if (jkey == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(
+ jkey, 0, static_cast(slice.size()),
+ const_cast(reinterpret_cast(slice.data())));
+ return jkey;
+}
+
+
+jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getValue(JNIEnv *env,
+ jobject obj,
+ jlong native_handle) {
+ ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->value();
+ jbyteArray jkey = env->NewByteArray(static_cast(slice.size()));
+ if (jkey == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(
+ jkey, 0, static_cast(slice.size()),
+ const_cast(reinterpret_cast(slice.data())));
+ return jkey;
+}
+
+jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env,
+ jobject obj,
+ jlong native_handle) {
+ uint64_t sequence_number =
+ reinterpret_cast(native_handle)->sequenceNumber();
+ jlong result;
+ std::memcpy(&result, &sequence_number, sizeof(jlong));
+ return result;
+}
+
+
+jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getType(JNIEnv *env,
+ jobject obj,
+ jlong native_handle) {
+ uint32_t type = reinterpret_cast(native_handle)->type();
+ return static_cast(type);
+}
+
+
+void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env,
+ jobject obj,
+ jlong native_handle) {
+ delete reinterpret_cast(native_handle);
+}
diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp
new file mode 100644
index 000000000000..f3b8dc02639d
--- /dev/null
+++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader.h"
+#include "rocksdb/options.h"
+#include "rocksdb/raw_sst_file_reader.h"
+#include "rocksdb/raw_iterator.h"
+#include
+#include "cplusplus_to_java_convert.h"
+#include
+
+jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj,
+ jlong options_handle,
+ jstring jfilename,
+ jint readahead_size) {
+ ROCKSDB_NAMESPACE::Options *options = reinterpret_cast(options_handle);
+ const char *file_path = env->GetStringUTFChars(jfilename, nullptr);
+ size_t read_ahead_size_value = static_cast(readahead_size);
+ ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader =
+ new ROCKSDB_NAMESPACE::RawSstFileReader(*options, file_path, read_ahead_size_value, true, true);
+ env->ReleaseStringUTFChars(jfilename, file_path);
+ return GET_CPLUSPLUS_POINTER(raw_sst_file_reader);
+}
+
+jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj,
+ jlong native_handle,
+ jboolean jhas_from,
+ jlong from_slice_handle,
+ jboolean jhas_to,
+ jlong to_slice_handle) {
+ ROCKSDB_NAMESPACE::Slice* from_slice = nullptr;
+ ROCKSDB_NAMESPACE::Slice* to_slice = nullptr;
+ ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader =
+ reinterpret_cast(native_handle);
+ bool has_from = static_cast(jhas_from);
+ bool has_to = static_cast(jhas_to);
+ if (has_from) {
+ from_slice = reinterpret_cast(from_slice_handle);
+ }
+ if (has_to) {
+ to_slice = reinterpret_cast(to_slice_handle);
+ }
+ ROCKSDB_NAMESPACE::RawIterator* iterator = raw_sst_file_reader->newIterator(has_from, from_slice, has_to, to_slice);
+ return GET_CPLUSPLUS_POINTER(iterator);
+}
+
+void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj,
+ jlong native_handle) {
+ delete reinterpret_cast(native_handle);
+}
diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp b/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp
deleted file mode 100644
index f1dd54438700..000000000000
--- a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Pipe.h"
-#include
-
-const int Pipe::READ_FILE_DESCRIPTOR_IDX = 0;
-const int Pipe::WRITE_FILE_DESCRIPTOR_IDX = 1;
-
-Pipe::Pipe() {
- pipe(p);
- open = true;
-}
-
-Pipe::~Pipe() {
- ::close(p[Pipe::READ_FILE_DESCRIPTOR_IDX]);
- ::close(p[Pipe::WRITE_FILE_DESCRIPTOR_IDX]);
-}
-
-void Pipe::close() {
- open = false;
-}
diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.h b/hadoop-hdds/rocks-native/src/main/native/Pipe.h
deleted file mode 100644
index aa75c6311cbc..000000000000
--- a/hadoop-hdds/rocks-native/src/main/native/Pipe.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ROCKS_NATIVE_PIPE_H
-#define ROCKS_NATIVE_PIPE_H
-
-#include
-
-class Pipe {
- public:
- static const int READ_FILE_DESCRIPTOR_IDX;
- static const int WRITE_FILE_DESCRIPTOR_IDX;
- Pipe();
- ~Pipe();
- void close();
- int getReadFd() {
- return getPipeFileDescriptorIndex(READ_FILE_DESCRIPTOR_IDX);
- }
-
- int getWriteFd() {
- return getPipeFileDescriptorIndex(WRITE_FILE_DESCRIPTOR_IDX);
- }
-
- int getPipeFileDescriptorIndex(int idx) {
- return p[idx];
- }
-
- bool isOpen() {
- return open;
- }
-
-
- private:
- int p[2];
- FILE* wr;
- bool open;
-
-};
-
-#endif //ROCKS_NATIVE_PIPE_H
diff --git a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp b/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp
deleted file mode 100644
index 53f60cdd65af..000000000000
--- a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include
-#include
-#include "Pipe.h"
-#include "cplusplus_to_java_convert.h"
-#include "org_apache_hadoop_hdds_utils_db_managed_PipeInputStream.h"
-
-
-jlong Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_newPipe(JNIEnv *, jobject) {
- Pipe *pipe = new Pipe();
- return GET_CPLUSPLUS_POINTER(pipe);
-}
-
-jint Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_readInternal(JNIEnv *env, jobject object, jbyteArray jbyteArray, jint capacity, jlong nativeHandle) {
- int cap_int = capacity;
- Pipe *pipe = reinterpret_cast(nativeHandle);
- jbyte *b = (env)->GetByteArrayElements(jbyteArray, JNI_FALSE);
- cap_int = read(pipe->getReadFd(), b, cap_int);
- if (cap_int == 0) {
- if (!pipe->isOpen()) {
- cap_int = -1;
- }
- }
- env->ReleaseByteArrayElements(jbyteArray, b, 0);
- return cap_int;
-}
-
-void Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_closeInternal(JNIEnv *env, jobject object, jlong nativeHandle) {
- delete reinterpret_cast(nativeHandle);
-}
-
diff --git a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp b/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp
deleted file mode 100644
index 285c5906c2d8..000000000000
--- a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool.h"
-#include "rocksdb/options.h"
-#include "rocksdb/sst_dump_tool.h"
-#include
-#include "cplusplus_to_java_convert.h"
-#include "Pipe.h"
-#include
-
-jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool_runInternal(JNIEnv *env, jobject obj,
- jobjectArray argsArray, jlong optionsHandle, jlong pipeHandle) {
- ROCKSDB_NAMESPACE::SSTDumpTool dumpTool;
- ROCKSDB_NAMESPACE::Options options;
- Pipe *pipe = reinterpret_cast(pipeHandle);
- int length = env->GetArrayLength(argsArray);
- char *args[length + 1];
- for (int i = 0; i < length; i++) {
- jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)i);
- char *utf_str = (char *)env->GetStringUTFChars(str_val, JNI_FALSE);
- args[i + 1] = utf_str;
- }
- FILE *wr = fdopen(pipe->getWriteFd(), "w");
- int ret = dumpTool.Run(length + 1, args, options, wr);
- for (int i = 1; i < length + 1; i++) {
- jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)(i - 1));
- env->ReleaseStringUTFChars(str_val, args[i]);
- }
- fclose(wr);
- pipe->close();
- return ret;
-}
diff --git a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h
index efe9d4a5be24..4862ea12a1b9 100644
--- a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h
+++ b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
diff --git a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch
index 841c2533b863..12dc74614a45 100644
--- a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch
+++ b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch
@@ -16,592 +16,531 @@
* limitations under the License.
*/
-diff --git a/include/rocksdb/sst_dump_tool.h b/include/rocksdb/sst_dump_tool.h
-index 9261ba47d..1e62b88a3 100644
---- a/include/rocksdb/sst_dump_tool.h
-+++ b/include/rocksdb/sst_dump_tool.h
-@@ -11,7 +11,8 @@ namespace ROCKSDB_NAMESPACE {
-
- class SSTDumpTool {
- public:
-- int Run(int argc, char const* const* argv, Options options = Options());
-+ int Run(int argc, char const* const* argv, Options options = Options(),
-+ FILE* out = stdout, FILE* err = stderr);
- };
-
- } // namespace ROCKSDB_NAMESPACE
-diff --git a/table/sst_file_dumper.cc b/table/sst_file_dumper.cc
-index eefbaaeee..734a2f0dd 100644
---- a/table/sst_file_dumper.cc
-+++ b/table/sst_file_dumper.cc
-@@ -45,7 +45,7 @@ SstFileDumper::SstFileDumper(const Options& options,
- Temperature file_temp, size_t readahead_size,
- bool verify_checksum, bool output_hex,
- bool decode_blob_index, const EnvOptions& soptions,
-- bool silent)
-+ bool silent, FILE* out, FILE* err)
- : file_name_(file_path),
- read_num_(0),
- file_temp_(file_temp),
-@@ -57,10 +57,13 @@ SstFileDumper::SstFileDumper(const Options& options,
- ioptions_(options_),
- moptions_(ColumnFamilyOptions(options_)),
- read_options_(verify_checksum, false),
-- internal_comparator_(BytewiseComparator()) {
-+ internal_comparator_(BytewiseComparator()),
-+ out_(out),
-+ err_(err)
-+ {
- read_options_.readahead_size = readahead_size;
- if (!silent_) {
-- fprintf(stdout, "Process %s\n", file_path.c_str());
-+ fprintf(out_, "Process %s\n", file_path.c_str());
- }
- init_result_ = GetTableReader(file_name_);
- }
-@@ -253,17 +256,17 @@ Status SstFileDumper::ShowAllCompressionSizes(
- int32_t compress_level_from, int32_t compress_level_to,
- uint32_t max_dict_bytes, uint32_t zstd_max_train_bytes,
- uint64_t max_dict_buffer_bytes, bool use_zstd_dict_trainer) {
-- fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size);
-+ fprintf(out_, "Block Size: %" ROCKSDB_PRIszt "\n", block_size);
- for (auto& i : compression_types) {
- if (CompressionTypeSupported(i.first)) {
-- fprintf(stdout, "Compression: %-24s\n", i.second);
-+ fprintf(out_, "Compression: %-24s\n", i.second);
- CompressionOptions compress_opt;
- compress_opt.max_dict_bytes = max_dict_bytes;
- compress_opt.zstd_max_train_bytes = zstd_max_train_bytes;
- compress_opt.max_dict_buffer_bytes = max_dict_buffer_bytes;
- compress_opt.use_zstd_dict_trainer = use_zstd_dict_trainer;
- for (int32_t j = compress_level_from; j <= compress_level_to; j++) {
-- fprintf(stdout, "Compression level: %d", j);
-+ fprintf(out_, "Compression level: %d", j);
- compress_opt.level = j;
- Status s = ShowCompressionSize(block_size, i.first, compress_opt);
- if (!s.ok()) {
-@@ -271,7 +274,7 @@ Status SstFileDumper::ShowAllCompressionSizes(
- }
- }
- } else {
-- fprintf(stdout, "Unsupported compression type: %s.\n", i.second);
-+ fprintf(err_, "Unsupported compression type: %s.\n", i.second);
- }
- }
- return Status::OK();
-@@ -307,9 +310,9 @@ Status SstFileDumper::ShowCompressionSize(
- }
-
- std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
-- fprintf(stdout, " Size: %10" PRIu64, file_size);
-- fprintf(stdout, " Blocks: %6" PRIu64, num_data_blocks);
-- fprintf(stdout, " Time Taken: %10s microsecs",
-+ fprintf(out_, " Size: %10" PRIu64, file_size);
-+ fprintf(out_, " Blocks: %6" PRIu64, num_data_blocks);
-+ fprintf(out_, " Time Taken: %10s microsecs",
- std::to_string(
- std::chrono::duration_cast(end - start)
- .count())
-@@ -342,11 +345,11 @@ Status SstFileDumper::ShowCompressionSize(
- : ((static_cast(not_compressed_blocks) /
- static_cast(num_data_blocks)) *
- 100.0);
-- fprintf(stdout, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks,
-+ fprintf(out_, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks,
- compressed_pcnt);
-- fprintf(stdout, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)",
-+ fprintf(out_, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)",
- ratio_not_compressed_blocks, ratio_not_compressed_pcnt);
-- fprintf(stdout, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n",
-+ fprintf(out_, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n",
- not_compressed_blocks, not_compressed_pcnt);
- return Status::OK();
- }
-@@ -362,7 +365,7 @@ Status SstFileDumper::ReadTableProperties(uint64_t table_magic_number,
- /* memory_allocator= */ nullptr, prefetch_buffer);
- if (!s.ok()) {
- if (!silent_) {
-- fprintf(stdout, "Not able to read table properties\n");
-+ fprintf(err_, "Not able to read table properties\n");
- }
- }
- return s;
-@@ -410,7 +413,7 @@ Status SstFileDumper::SetTableOptionsByMagicNumber(
-
- options_.table_factory.reset(NewPlainTableFactory(plain_table_options));
- if (!silent_) {
-- fprintf(stdout, "Sst file format: plain table\n");
-+ fprintf(out_, "Sst file format: plain table\n");
- }
- } else {
- char error_msg_buffer[80];
-@@ -427,15 +430,56 @@ Status SstFileDumper::SetOldTableOptions() {
- assert(table_properties_ == nullptr);
- options_.table_factory = std::make_shared();
- if (!silent_) {
-- fprintf(stdout, "Sst file format: block-based(old version)\n");
-+ fprintf(out_, "Sst file format: block-based(old version)\n");
- }
-
- return Status::OK();
- }
-
-+void write(int value, FILE* file) {
-+ char b[4];
-+ b[3] = value & 0x000000ff;
-+ b[2] = (value & 0x0000ff00) >> 8;
-+ b[1] = (value & 0x00ff0000) >> 16;
-+ b[0] = (value & 0xff000000) >> 24;
-+ std::fwrite(b, 4, 1, file);
+diff --git a/include/rocksdb/raw_iterator.h b/include/rocksdb/raw_iterator.h
+new file mode 100644
+index 000000000..21242ed15
+--- /dev/null
++++ b/include/rocksdb/raw_iterator.h
+@@ -0,0 +1,25 @@
++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
++// This source code is licensed under both the GPLv2 (found in the
++// COPYING file in the root directory) and Apache 2.0 License
++// (found in the LICENSE.Apache file in the root directory).
++#pragma once
++#ifndef ROCKSDB_LITE
++
++
++#include "rocksdb/advanced_options.h"
++namespace ROCKSDB_NAMESPACE {
++
++class RawIterator {
++ public:
++ virtual ~RawIterator() {}
++ virtual bool Valid() const = 0;
++ virtual Slice key() const = 0;
++ virtual Slice value() const = 0;
++ virtual uint64_t sequenceNumber() const = 0;
++ virtual uint32_t type() const = 0;
++ virtual void Next() = 0;
++};
++
++} // namespace ROCKSDB_NAMESPACE
++
++#endif // ROCKSDB_LITE
+diff --git a/include/rocksdb/raw_sst_file_reader.h b/include/rocksdb/raw_sst_file_reader.h
+new file mode 100644
+index 000000000..09e748208
+--- /dev/null
++++ b/include/rocksdb/raw_sst_file_reader.h
+@@ -0,0 +1,62 @@
++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
++// This source code is licensed under both the GPLv2 (found in the
++// COPYING file in the root directory) and Apache 2.0 License
++// (found in the LICENSE.Apache file in the root directory).
++#pragma once
++#ifndef ROCKSDB_LITE
++
++#include
++#include
++
++#include "rocksdb/raw_iterator.h"
++#include "rocksdb/advanced_options.h"
++#include "rocksdb/options.h"
++
++
++
++namespace ROCKSDB_NAMESPACE {
++
++class RawSstFileReader {
++ public:
++
++ RawSstFileReader(const Options& options, const std::string& file_name,
++ size_t readahead_size, bool verify_checksum,
++ bool silent = false);
++ ~RawSstFileReader();
++
++ RawIterator* newIterator(bool has_from, Slice* from,
++ bool has_to, Slice *to);
++ Status getStatus() { return init_result_; }
++
++ private:
++ // Get the TableReader implementation for the sst file
++ Status GetTableReader(const std::string& file_path);
++ Status ReadTableProperties(uint64_t table_magic_number,
++ uint64_t file_size);
++
++ Status SetTableOptionsByMagicNumber(uint64_t table_magic_number);
++ Status SetOldTableOptions();
++
++ // Helper function to call the factory with settings specific to the
++ // factory implementation
++ Status NewTableReader(uint64_t file_size);
++
++ std::string file_name_;
++ Temperature file_temp_;
++
++ // less verbose in stdout/stderr
++ bool silent_;
++
++ // options_ and internal_comparator_ will also be used in
++ // ReadSequential internally (specifically, seek-related operations)
++ Options options_;
++
++ Status init_result_;
++
++ struct Rep;
++ std::unique_ptr rep_;
++};
++
++} // namespace ROCKSDB_NAMESPACE
++
++#endif // ROCKSDB_LITE
+diff --git a/src.mk b/src.mk
+index b94bc43ca..c13e5cde6 100644
+--- a/src.mk
++++ b/src.mk
+@@ -338,11 +338,8 @@ RANGE_TREE_SOURCES =\
+ utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc
+
+ TOOL_LIB_SOURCES = \
+- tools/io_tracer_parser_tool.cc \
+- tools/ldb_cmd.cc \
+- tools/ldb_tool.cc \
+- tools/sst_dump_tool.cc \
+- utilities/blob_db/blob_dump_tool.cc \
++ tools/raw_sst_file_reader.cc \
++ tools/raw_sst_file_iterator.cc \
+
+ ANALYZER_LIB_SOURCES = \
+ tools/block_cache_analyzer/block_cache_trace_analyzer.cc \
+diff --git a/tools/raw_sst_file_iterator.cc b/tools/raw_sst_file_iterator.cc
+new file mode 100644
+index 000000000..3051637a3
+--- /dev/null
++++ b/tools/raw_sst_file_iterator.cc
+@@ -0,0 +1,76 @@
++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
++// This source code is licensed under both the GPLv2 (found in the
++// COPYING file in the root directory) and Apache 2.0 License
++// (found in the LICENSE.Apache file in the root directory).
++//
++#ifndef ROCKSDB_LITE
++
++
++#include
++#include
++
++#include "db/memtable.h"
++#include "db/write_batch_internal.h"
++#include "rocksdb/status.h"
++#include "rocksdb/utilities/ldb_cmd.h"
++#include "table/block_based/block.h"
++#include "table/block_based/block_based_table_builder.h"
++#include "table/block_based/block_based_table_factory.h"
++#include "table/meta_blocks.h"
++#include "table/plain/plain_table_factory.h"
++#include "tools/raw_sst_file_iterator.h"
++
++namespace ROCKSDB_NAMESPACE {
++
++RawSstFileIterator::RawSstFileIterator(InternalIterator* iterator,
++ bool has_from, Slice* from_key,
++ bool has_to, Slice* to_key)
++ : iter_(iterator),
++ ikey(new ParsedInternalKey()),
++ has_to_(has_to),
++ to_key_(to_key) {
++ if (has_from) {
++ InternalKey k;
++ k.SetMinPossibleForUserKey(*from_key);
++ iter_->Seek(k.Encode());
++ } else {
++ iter_->SeekToFirst();
++ }
++ initKey();
++}
++
++bool RawSstFileIterator::Valid() const {
++ return iter_->Valid() && (!has_to_ ||
++ BytewiseComparator()->Compare(
++ key(), *to_key_) < 0);
++}
++
++void RawSstFileIterator::initKey() {
++ if (iter_->Valid()) {
++ ParseInternalKey(iter_->key(), ikey, true /* log_err_key */);
++ }
+}
++void RawSstFileIterator::Next() {
++ iter_->Next();
++ initKey();
++
++}
++
++Slice RawSstFileIterator::key() const {
++ return ikey->user_key;
++}
++
++uint64_t RawSstFileIterator::sequenceNumber() const {
++ return ikey->sequence;
++}
++
++uint32_t RawSstFileIterator::type() const {
++ return static_cast(ikey->type);
++}
++
++Slice RawSstFileIterator::value() const {
++ return iter_->value();
++}
++} // namespace ROCKSDB_NAMESPACE
++
++#endif // ROCKSDB_LITE
+diff --git a/tools/raw_sst_file_iterator.h b/tools/raw_sst_file_iterator.h
+new file mode 100644
+index 000000000..58e34b260
+--- /dev/null
++++ b/tools/raw_sst_file_iterator.h
+@@ -0,0 +1,45 @@
++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
++// This source code is licensed under both the GPLv2 (found in the
++// COPYING file in the root directory) and Apache 2.0 License
++// (found in the LICENSE.Apache file in the root directory).
++#pragma once
++#ifndef ROCKSDB_LITE
+
-+void write(const char* value, int length, FILE* file) {
-+ write(length, file);
-+ fwrite(value, length, 1, file);
++#include
++#include
++#include "file/writable_file_writer.h"
++#include "rocksdb/advanced_options.h"
++#include "rocksdb/raw_iterator.h"
++
++namespace ROCKSDB_NAMESPACE {
++
++class RawSstFileIterator : public RawIterator {
++ public:
++ explicit RawSstFileIterator(InternalIterator* iterator,
++ bool has_from,
++ Slice* from_key,
++ bool has_to,
++ Slice* to_key);
++
++ bool Valid() const override;
++ Slice key() const override;
++ Slice value() const override;
++ uint64_t sequenceNumber() const override;
++ uint32_t type() const override;
++ void Next() final override;
++
++ ~RawSstFileIterator(){
++ delete iter_;
++ }
++
++ private:
++ void initKey();
++ InternalIterator* iter_;
++ ParsedInternalKey* ikey;
++ bool has_to_;
++ Slice* to_key_;
++};
++
++} // namespace ROCKSDB_NAMESPACE
++
++#endif // ROCKSDB_LITE
+diff --git a/tools/raw_sst_file_reader.cc b/tools/raw_sst_file_reader.cc
+new file mode 100644
+index 000000000..5ba8a82ee
+--- /dev/null
++++ b/tools/raw_sst_file_reader.cc
+@@ -0,0 +1,272 @@
++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
++// This source code is licensed under both the GPLv2 (found in the
++// COPYING file in the root directory) and Apache 2.0 License
++// (found in the LICENSE.Apache file in the root directory).
++//
++#ifndef ROCKSDB_LITE
++
++#include "rocksdb/raw_sst_file_reader.h"
++
++#include
++#include
++#include
+
+ org.codehaus.mojo
+ properties-maven-plugin
+ ${properties.maven.plugin.version}
+
+
+ org.apache.maven
+ maven-core
+ ${maven.core.version}
+
+
+
From 900bd4c145121eb1558883f01f9fd30d5fbbc615 Mon Sep 17 00:00:00 2001
From: Sarveksha Yeshavantha Raju
<79865743+sarvekshayr@users.noreply.github.com>
Date: Tue, 27 Feb 2024 20:03:44 +0800
Subject: [PATCH 3/9] HDDS-815. Rename HDDS config keys prefixed with dfs.
(#6274)
(cherry picked from commit 7939faf7d6c904bf1e4ad32baa5d6d0c1de19003)
---
.../hadoop/hdds/conf/OzoneConfiguration.java | 63 +++++++++++++++-
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 38 +++++-----
.../apache/hadoop/ozone/OzoneConfigKeys.java | 22 +++---
.../src/main/resources/ozone-default.xml | 71 ++++++++++---------
.../feature/Streaming-Write-Pipeline.md | 4 +-
.../runConfigurations/Datanode2-ha.xml | 2 +-
.../intellij/runConfigurations/Datanode2.xml | 2 +-
.../runConfigurations/Datanode3-ha.xml | 2 +-
.../intellij/runConfigurations/Datanode3.xml | 2 +-
.../src/main/compose/ozone-ha/docker-config | 2 +-
.../dist/src/main/compose/ozone/docker-config | 2 +-
.../main/compose/ozonesecure-ha/docker-config | 2 +-
.../main/compose/ozonesecure/docker-config | 2 +-
.../src/test/resources/ozone-site.xml | 5 +-
14 files changed, 143 insertions(+), 76 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index 10c2189f5649..85a1f74e56e4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.ratis.server.RaftServerConfigKeys;
import static java.util.Collections.unmodifiableSortedSet;
@@ -332,7 +333,67 @@ private static void addDeprecatedKeys() {
new DeprecationDelta("ozone.scm.chunk.layout",
ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY),
new DeprecationDelta("hdds.datanode.replication.work.dir",
- OZONE_CONTAINER_COPY_WORKDIR)
+ OZONE_CONTAINER_COPY_WORKDIR),
+ new DeprecationDelta("dfs.container.chunk.write.sync",
+ OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY),
+ new DeprecationDelta("dfs.container.ipc",
+ OzoneConfigKeys.DFS_CONTAINER_IPC_PORT),
+ new DeprecationDelta("dfs.container.ipc.random.port",
+ OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT),
+ new DeprecationDelta("dfs.container.ratis.admin.port",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT),
+ new DeprecationDelta("dfs.container.ratis.datanode.storage.dir",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR),
+ new DeprecationDelta("dfs.container.ratis.datastream.enabled",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED),
+ new DeprecationDelta("dfs.container.ratis.datastream.port",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT),
+ new DeprecationDelta("dfs.container.ratis.datastream.random.port",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT),
+ new DeprecationDelta("dfs.container.ratis.enabled",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY),
+ new DeprecationDelta("dfs.container.ratis.ipc",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT),
+ new DeprecationDelta("dfs.container.ratis.ipc.random.port",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT),
+ new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT),
+ new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT),
+ new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS),
+ new DeprecationDelta("dfs.container.ratis.log.purge.gap",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP),
+ new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT),
+ new DeprecationDelta("dfs.container.ratis.log.queue.num-elements",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS),
+ new DeprecationDelta("dfs.container.ratis.num.container.op.executors",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY),
+ new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME),
+ new DeprecationDelta("dfs.container.ratis.replication.level",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY),
+ new DeprecationDelta("dfs.container.ratis.rpc.type",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY),
+ new DeprecationDelta("dfs.container.ratis.segment.preallocated.size",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY),
+ new DeprecationDelta("dfs.container.ratis.segment.size",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY),
+ new DeprecationDelta("dfs.container.ratis.server.port",
+ OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT),
+ new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES),
+ new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT),
+ new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions",
+ ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS),
+ new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration",
+ ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY),
+ new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration",
+ ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY),
+ new DeprecationDelta("dfs.ratis.snapshot.threshold",
+ ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY)
});
}
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 007dc3dfaef8..94b58890efdc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -42,93 +42,93 @@ public final class ScmConfigKeys {
"ozone.scm.db.dirs.permissions";
public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
- = "dfs.container.ratis.enabled";
+ = "hdds.container.ratis.enabled";
public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
= false;
public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
- = "dfs.container.ratis.rpc.type";
+ = "hdds.container.ratis.rpc.type";
public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
= "GRPC";
public static final String
DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME
- = "dfs.container.ratis.num.write.chunk.threads.per.volume";
+ = "hdds.container.ratis.num.write.chunk.threads.per.volume";
public static final int
DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT
= 10;
public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
- = "dfs.container.ratis.replication.level";
+ = "hdds.container.ratis.replication.level";
public static final ReplicationLevel
DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY;
public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
- = "dfs.container.ratis.num.container.op.executors";
+ = "hdds.container.ratis.num.container.op.executors";
public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
= 10;
public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
- "dfs.container.ratis.segment.size";
+ "hdds.container.ratis.segment.size";
public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
"64MB";
public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY =
- "dfs.container.ratis.segment.preallocated.size";
+ "hdds.container.ratis.segment.preallocated.size";
public static final String
DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB";
public static final String
DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
- "dfs.container.ratis.statemachinedata.sync.timeout";
+ "hdds.container.ratis.statemachinedata.sync.timeout";
public static final TimeDuration
DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
TimeDuration.valueOf(10, TimeUnit.SECONDS);
public static final String
DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
- "dfs.container.ratis.statemachinedata.sync.retries";
+ "hdds.container.ratis.statemachinedata.sync.retries";
public static final String
DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS =
- "dfs.container.ratis.statemachine.max.pending.apply-transactions";
+ "hdds.container.ratis.statemachine.max.pending.apply-transactions";
// The default value of maximum number of pending state machine apply
// transactions is kept same as default snapshot threshold.
public static final int
DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT =
100000;
public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
- "dfs.container.ratis.log.queue.num-elements";
+ "hdds.container.ratis.log.queue.num-elements";
public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
1024;
public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
- "dfs.container.ratis.log.queue.byte-limit";
+ "hdds.container.ratis.log.queue.byte-limit";
public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
"4GB";
public static final String
DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
- "dfs.container.ratis.log.appender.queue.num-elements";
+ "hdds.container.ratis.log.appender.queue.num-elements";
public static final int
DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1;
public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
- "dfs.container.ratis.log.appender.queue.byte-limit";
+ "hdds.container.ratis.log.appender.queue.byte-limit";
public static final String
DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP =
- "dfs.container.ratis.log.purge.gap";
+ "hdds.container.ratis.log.purge.gap";
// TODO: Set to 1024 once RATIS issue around purge is fixed.
public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
1000000;
public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
- "dfs.container.ratis.leader.pending.bytes.limit";
+ "hdds.container.ratis.leader.pending.bytes.limit";
public static final String
DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB";
public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
- "dfs.ratis.server.retry-cache.timeout.duration";
+ "hdds.ratis.server.retry-cache.timeout.duration";
public static final TimeDuration
DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
public static final String
DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
- "dfs.ratis.leader.election.minimum.timeout.duration";
+ "hdds.ratis.leader.election.minimum.timeout.duration";
public static final TimeDuration
DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
TimeDuration.valueOf(5, TimeUnit.SECONDS);
public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
- "dfs.ratis.snapshot.threshold";
+ "hdds.ratis.snapshot.threshold";
public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000;
// TODO : this is copied from OzoneConsts, may need to move to a better place
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 21c89cc3c8d4..293401f4c275 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -37,7 +37,7 @@
@InterfaceStability.Unstable
public final class OzoneConfigKeys {
public static final String DFS_CONTAINER_IPC_PORT =
- "dfs.container.ipc";
+ "hdds.container.ipc.port";
public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
@@ -60,47 +60,47 @@ public final class OzoneConfigKeys {
* as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}.
*/
public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
- "dfs.container.ipc.random.port";
+ "hdds.container.ipc.random.port";
public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
false;
public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT =
- "dfs.container.ratis.datastream.random.port";
+ "hdds.container.ratis.datastream.random.port";
public static final boolean
DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT =
false;
public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
- "dfs.container.chunk.write.sync";
+ "hdds.container.chunk.write.sync";
public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
/**
* Ratis Port where containers listen to.
*/
public static final String DFS_CONTAINER_RATIS_IPC_PORT =
- "dfs.container.ratis.ipc";
+ "hdds.container.ratis.ipc.port";
public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
/**
* Ratis Port where containers listen to admin requests.
*/
public static final String DFS_CONTAINER_RATIS_ADMIN_PORT =
- "dfs.container.ratis.admin.port";
+ "hdds.container.ratis.admin.port";
public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857;
/**
* Ratis Port where containers listen to server-to-server requests.
*/
public static final String DFS_CONTAINER_RATIS_SERVER_PORT =
- "dfs.container.ratis.server.port";
+ "hdds.container.ratis.server.port";
public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856;
/**
* Ratis Port where containers listen to datastream requests.
*/
public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED
- = "dfs.container.ratis.datastream.enabled";
+ = "hdds.container.ratis.datastream.enabled";
public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT
= false;
public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT
- = "dfs.container.ratis.datastream.port";
+ = "hdds.container.ratis.datastream.port";
public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT
= 9855;
@@ -134,7 +134,7 @@ public final class OzoneConfigKeys {
* a mini cluster is able to launch multiple containers on a node.
*/
public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
- "dfs.container.ratis.ipc.random.port";
+ "hdds.container.ratis.ipc.random.port";
public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
false;
public static final String OZONE_TRACE_ENABLED_KEY =
@@ -371,7 +371,7 @@ public final class OzoneConfigKeys {
ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
- "dfs.container.ratis.datanode.storage.dir";
+ "hdds.container.ratis.datanode.storage.dir";
public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index e9b74177e1c9..4cc32eb336c3 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -46,26 +46,26 @@