diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 7d46b01a6dbf..212931142f83 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -79,15 +79,6 @@ public final class OzoneConfigKeys { "ozone.trace.enabled"; public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; - public static final String OZONE_METADATA_STORE_IMPL = - "ozone.metastore.impl"; - public static final String OZONE_METADATA_STORE_IMPL_LEVELDB = - "LevelDB"; - public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB = - "RocksDB"; - public static final String OZONE_METADATA_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = "ozone.metastore.rocksdb.statistics"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index a6833a5ee990..ea0466f6cea5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -360,4 +360,6 @@ private OzoneConsts() { public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO"; public static final String TRANSACTION_INFO_SPLIT_KEY = "#"; + public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; + public static final String CONTAINER_DB_TYPE_LEVELDB = "LevelDB"; } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 8b724d9818e7..c72dad0cee93 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -634,17 +634,6 @@ dfs.container.ratis.datanode.storage.dir be configured separately. - - ozone.metastore.impl - RocksDB - OZONE, OM, SCM, CONTAINER, STORAGE - - Ozone metadata store implementation. Ozone metadata are well - distributed to multiple services such as ozoneManager, scm. They are stored in - some local key-value databases. This property determines which database - library to use. Supported value is either LevelDB or RocksDB. - - ozone.metastore.rocksdb.statistics @@ -2291,15 +2280,6 @@ Whether to enable topology aware read to improve the read performance. - - ozone.recon.container.db.impl - RocksDB - OZONE, RECON, STORAGE - - Ozone Recon container DB store implementation.Supported value is either - LevelDB or RocksDB. - - ozone.recon.om.db.dir diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 49b907f10ec5..a80841f60035 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -129,12 +128,8 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, chunksPath, dbFile, config); - String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - //Set containerData for the KeyValueContainer. containerData.setChunksPath(chunksPath.getPath()); - containerData.setContainerDBType(impl); containerData.setDbFile(dbFile); containerData.setVolume(containerVolume); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 95795e64c953..d6c4ff0c5575 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -47,8 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; /** * Class to run integrity checks on Datanode Containers. @@ -186,8 +186,8 @@ private void checkContainerFile() throws IOException { } dbType = onDiskContainerData.getContainerDBType(); - if (!dbType.equals(OZONE_METADATA_STORE_IMPL_ROCKSDB) && - !dbType.equals(OZONE_METADATA_STORE_IMPL_LEVELDB)) { + if (!dbType.equals(CONTAINER_DB_TYPE_ROCKSDB) && + !dbType.equals(CONTAINER_DB_TYPE_LEVELDB)) { String errStr = "Unknown DBType [" + dbType + "] in Container File for [" + containerID + "]"; throw new IOException(errStr); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 373b3223a68d..5698d7267882 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static java.lang.Math.max; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY; import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY; @@ -65,7 +66,7 @@ public class KeyValueContainerData extends ContainerData { private String metadataPath; //Type of DB used to store key to chunks mapping - private String containerDBType; + private String containerDBType = CONTAINER_DB_TYPE_ROCKSDB; private File dbFile = null; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 62097b38e8b3..aff0528bdef4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -45,9 +45,6 @@ import com.google.common.primitives.Longs; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; import org.junit.After; @@ -71,22 +68,18 @@ public class TestKeyValueBlockIterator { private OzoneConfiguration conf; private File testRoot; - private final String storeImpl; private final ChunkLayOutVersion layout; - public TestKeyValueBlockIterator(String metadataImpl, - ChunkLayOutVersion layout) { - this.storeImpl = metadataImpl; + public TestKeyValueBlockIterator(ChunkLayOutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB, FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_LEVELDB, FILE_PER_BLOCK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, FILE_PER_BLOCK}}); + {FILE_PER_CHUNK}, + {FILE_PER_BLOCK} + }); } @Before @@ -94,7 +87,6 @@ public void setUp() throws Exception { testRoot = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index cb8ef3406c63..4583a54f5c54 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -58,10 +58,6 @@ import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -76,7 +72,6 @@ private static final Logger LOG = LoggerFactory.getLogger(TestKeyValueContainerCheck.class); - private final String storeImpl; private final ChunkLayoutTestInfo chunkManagerTestInfo; private KeyValueContainer container; private KeyValueContainerData containerData; @@ -85,28 +80,22 @@ private File testRoot; private ChunkManager chunkManager; - public TestKeyValueContainerCheck(String metadataImpl, - ChunkLayoutTestInfo chunkManagerTestInfo) { - this.storeImpl = metadataImpl; + public TestKeyValueContainerCheck(ChunkLayoutTestInfo chunkManagerTestInfo) { this.chunkManagerTestInfo = chunkManagerTestInfo; } @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB, ChunkLayoutTestInfo.FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_LEVELDB, ChunkLayoutTestInfo.FILE_PER_BLOCK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, ChunkLayoutTestInfo.FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, ChunkLayoutTestInfo.FILE_PER_BLOCK} + {ChunkLayoutTestInfo.FILE_PER_CHUNK}, + {ChunkLayoutTestInfo.FILE_PER_BLOCK} }); } @Before public void setUp() throws Exception { - LOG.info("Testing store:{} layout:{}", - storeImpl, chunkManagerTestInfo.getLayout()); + LOG.info("Testing layout:{}", chunkManagerTestInfo.getLayout()); this.testRoot = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); chunkManagerTestInfo.updateConfig(conf); volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf); chunkManager = chunkManagerTestInfo.createChunkManager(true, null); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java index f5088683efff..8998f6c9d6d4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java @@ -42,6 +42,7 @@ /** * LevelDB interface. */ +@Deprecated public class LevelDBStore implements MetadataStore { private static final Logger LOG = diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java index f5b6769b70de..0a7abe33a60e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java @@ -25,6 +25,7 @@ /** * LevelDB store iterator. */ +@Deprecated public class LevelDBStoreIterator implements MetaStoreIterator { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java index d697fdfaccda..5e1a5a8dad9a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java @@ -26,14 +26,14 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; + import org.iq80.leveldb.Options; import org.rocksdb.BlockBasedTableConfig; import org.rocksdb.Statistics; @@ -104,22 +104,19 @@ public MetadataStore build() throws IOException { final ConfigurationSource conf = optionalConf.orElse(DEFAULT_CONF); if (dbType == null) { - LOG.debug("dbType is null, using "); - dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - LOG.debug("dbType is null, using dbType {} from ozone configuration", - dbType); + dbType = CONTAINER_DB_TYPE_ROCKSDB; + LOG.debug("dbType is null, using dbType {}.", dbType); } else { LOG.debug("Using dbType {} for metastore", dbType); } - if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) { + if (CONTAINER_DB_TYPE_LEVELDB.equals(dbType)) { Options options = new Options(); options.createIfMissing(createIfMissing); if (cacheSize > 0) { options.cacheSize(cacheSize); } return new LevelDBStore(dbFile, options); - } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) { + } else if (CONTAINER_DB_TYPE_ROCKSDB.equals(dbType)) { org.rocksdb.Options opts; // Used cached options if config object passed down is the same if (CACHED_OPTS.containsKey(conf)) { @@ -147,10 +144,8 @@ public MetadataStore build() throws IOException { return new RocksDBStore(dbFile, opts); } - throw new IllegalArgumentException("Invalid argument for " - + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL - + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB - + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB - + ", but met " + dbType); + throw new IllegalArgumentException("Invalid Container DB type. Expecting " + + CONTAINER_DB_TYPE_LEVELDB + " or " + + CONTAINER_DB_TYPE_ROCKSDB + ", but met " + dbType); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java index 3eb832f21084..ed9bfb3851c3 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java @@ -32,28 +32,30 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import com.google.common.collect.Lists; -import static java.nio.charset.StandardCharsets.UTF_8; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.ImmutablePair; -import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; + import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import static org.junit.runners.Parameterized.Parameters; import org.slf4j.event.Level; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; +import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.runners.Parameterized.Parameters; +import static java.nio.charset.StandardCharsets.UTF_8; /** * Test class for ozone metadata store. */ @@ -74,14 +76,14 @@ public TestMetadataStore(String metadataImpl) { @Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} + {CONTAINER_DB_TYPE_LEVELDB}, + {CONTAINER_DB_TYPE_ROCKSDB} }); } @Before public void init() throws IOException { - if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) { + if (CONTAINER_DB_TYPE_ROCKSDB.equals(storeImpl)) { // The initialization of RocksDB fails on Windows assumeNotWindows(); } @@ -90,12 +92,12 @@ public void init() throws IOException { + "-" + storeImpl.toLowerCase()); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); store = MetadataStoreBuilder.newBuilder() .setConf(conf) .setCreateIfMissing(true) .setDbFile(testDir) + .setDBType(storeImpl) .build(); // Add 20 entries. @@ -110,12 +112,13 @@ public void init() throws IOException { @Test public void testIterator() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); + File dbDir = GenericTestUtils.getRandomizedTestDir(); MetadataStore dbStore = MetadataStoreBuilder.newBuilder() .setConf(conf) .setCreateIfMissing(true) .setDbFile(dbDir) + .setDBType(storeImpl) .build(); //As database is empty, check whether iterator is working as expected or @@ -166,15 +169,15 @@ public void testIterator() throws Exception { public void testMetaStoreConfigDifferentFromType() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); + String dbType; GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); - if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) { - dbType = "RocksDB"; - } else { + if (storeImpl.equals(CONTAINER_DB_TYPE_LEVELDB)) { dbType = "LevelDB"; + } else { + dbType = "RocksDB"; } File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() @@ -193,7 +196,7 @@ public void testMetaStoreConfigDifferentFromType() throws IOException { public void testdbTypeNotSet() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); + GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); @@ -203,7 +206,7 @@ public void testdbTypeNotSet() throws IOException { MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf) .setCreateIfMissing(true).setDbFile(dbDir).build(); assertTrue(logCapturer.getOutput().contains("dbType is null, using dbType" + - " " + storeImpl)); + " " + CONTAINER_DB_TYPE_ROCKSDB)); dbStore.close(); dbStore.destroy(); FileUtils.deleteDirectory(dbDir); @@ -213,8 +216,11 @@ public void testdbTypeNotSet() throws IOException { @After public void cleanup() throws IOException { if (store != null) { + System.out.println("--- Closing Store: " + store.getClass()); store.close(); store.destroy(); + } else { + System.out.println("--- Store already closed: " + store.getClass()); } if (testDir != null) { FileUtils.deleteDirectory(testDir); @@ -460,7 +466,6 @@ public void testInvalidStartKey() throws IOException { public void testDestroyDB() throws IOException { // create a new DB to test db destroy OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() + "-" + storeImpl.toLowerCase() + "-toDestroy"); @@ -468,6 +473,7 @@ public void testDestroyDB() throws IOException { .setConf(conf) .setCreateIfMissing(true) .setDbFile(dbDir) + .setDBType(storeImpl) .build(); dbStore.put(getBytes("key1"), getBytes("value1")); @@ -485,7 +491,6 @@ public void testDestroyDB() throws IOException { @Test public void testBatchWrite() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() + "-" + storeImpl.toLowerCase() + "-batchWrite"); @@ -493,6 +498,7 @@ public void testBatchWrite() throws IOException { .setConf(conf) .setCreateIfMissing(true) .setDbFile(dbDir) + .setDBType(storeImpl) .build(); List expectedResult = Lists.newArrayList(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java index 610e898a2d70..8b3554a014cc 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java @@ -53,9 +53,6 @@ public class TestRocksDBStoreMBean { @Before public void init() throws Exception { conf = new OzoneConfiguration(); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java index a082e995afa1..b1ce4ba81cda 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java @@ -26,7 +26,6 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.UUID; @@ -34,7 +33,6 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.scm.cli.SQLCLI; @@ -51,13 +49,10 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; /** * This class tests the CLI that transforms om.db into SQLite DB files. */ -@RunWith(Parameterized.class) public class TestOmSQLCli { /** @@ -82,21 +77,6 @@ public class TestOmSQLCli { private String keyName2 = "key2"; private String keyName3 = "key3"; - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - // Uncomment the below line if we support leveldb in future. - //{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} - }); - } - - private String metaStoreType; - - public TestOmSQLCli(String type) { - metaStoreType = type; - } - /** * Create a MiniDFSCluster for testing. *

@@ -123,7 +103,6 @@ public void setup() throws Exception { cluster.getOzoneManager().stop(); cluster.getStorageContainerManager().stop(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType); cli = new SQLCLI(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 6c858abe7032..d3e228f09c84 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestStorageContainerManagerHelper; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -289,8 +288,6 @@ public void testSCMSafeMode() throws Exception { @Test(timeout = 300_000) public void testSCMSafeModeRestrictedOp() throws Exception { - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB); cluster.stop(); cluster = builder.build(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 360589688a60..704c18e18357 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.ozone.recon; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; - import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -80,11 +78,6 @@ public final class ReconServerConfigKeys { public static final String RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT = "1m"; - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL = - "ozone.recon.container.db.impl"; - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL = "recon.om.snapshot.task.interval.delay"; public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT