diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 4f14ede8fa52..a07ee5276ab9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -23,33 +23,28 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.ha.ConfUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.HATests; import org.apache.ratis.util.LifeCycle; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; import java.util.Optional; import java.util.OptionalInt; @@ -62,8 +57,9 @@ /** * Test client-side URI handling with Ozone Manager HA. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestOzoneFsHAURLs { +public abstract class TestOzoneFsHAURLs implements HATests.TestCase { /** * Set a timeout for each test. @@ -72,10 +68,9 @@ public class TestOzoneFsHAURLs { TestOzoneFsHAURLs.class); private OzoneConfiguration conf; - private static MiniOzoneHAClusterImpl cluster; - private static String omServiceId; - private static OzoneManager om; - private static int numOfOMs; + private MiniOzoneHAClusterImpl cluster; + private String omServiceId; + private OzoneManager om; private String volumeName; private String bucketName; @@ -85,7 +80,7 @@ public class TestOzoneFsHAURLs { "fs." + OzoneConsts.OZONE_URI_SCHEME + ".impl"; private static final String O3FS_IMPL_VALUE = "org.apache.hadoop.fs.ozone.OzoneFileSystem"; - private static OzoneClient client; + private OzoneClient client; private static final String OFS_IMPL_KEY = "fs." + OzoneConsts.OZONE_OFS_URI_SCHEME + ".impl"; @@ -95,25 +90,10 @@ public class TestOzoneFsHAURLs { @BeforeAll - static void initClass(@TempDir File tempDir) throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - omServiceId = "om-service-test1"; - numOfOMs = 3; - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getAbsolutePath()); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); - - conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, - BucketLayout.LEGACY.name()); - conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - - // Start the cluster - MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); - builder.setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(5); - cluster = builder.build(); - cluster.waitForClusterToBeReady(); - client = OzoneClientFactory.getRpcClient(omServiceId, conf); + void initClass() throws Exception { + cluster = cluster(); + omServiceId = cluster.getOzoneManager().getOMServiceId(); + client = cluster.newClient(); om = cluster.getOzoneManager(); } @@ -149,11 +129,8 @@ public void init() throws Exception { } @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java index bce962518735..9d47d7f41fa1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java @@ -29,25 +29,18 @@ import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; +import org.apache.ozone.test.ClusterForTests; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import java.io.IOException; -import java.time.Duration; import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.assertj.core.api.Assumptions.assumeThat; @@ -62,67 +55,28 @@ * but can tweak configuration by also overriding {@link #createOzoneConfig()}. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -abstract class AbstractOzoneContractTest { +abstract class AbstractOzoneContractTest extends ClusterForTests { private static final String CONTRACT_XML = "contract/ozone.xml"; - private MiniOzoneCluster cluster; - /** * This must be implemented by all subclasses. * @return the FS contract */ abstract AbstractFSContract createOzoneContract(Configuration conf); - /** - * Creates the base configuration for contract tests. This can be tweaked - * in subclasses by overriding {@link #createOzoneConfig()}. - */ - protected static OzoneConfiguration createBaseConfiguration() { - OzoneConfiguration conf = new OzoneConfiguration(); - DatanodeRatisServerConfig ratisServerConfig = - conf.getObject(DatanodeRatisServerConfig.class); - ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); - ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); - conf.setFromObject(ratisServerConfig); - - RatisClientConfig.RaftConfig raftClientConfig = - conf.getObject(RatisClientConfig.RaftConfig.class); - raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); - raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); - conf.setFromObject(raftClientConfig); - + @Override + protected OzoneConfiguration createOzoneConfig() { + OzoneConfiguration conf = createBaseConfiguration(); conf.addResource(CONTRACT_XML); - - conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); - conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); - return conf; } - /** - * Hook method that allows tweaking the configuration. - */ - OzoneConfiguration createOzoneConfig() { - return createBaseConfiguration(); - } - - MiniOzoneCluster getCluster() { - return cluster; - } - - @BeforeAll - void setup() throws Exception { - cluster = MiniOzoneCluster.newBuilder(createOzoneConfig()) + @Override + protected MiniOzoneCluster createCluster() throws Exception { + return MiniOzoneCluster.newBuilder(createOzoneConfig()) .setNumDatanodes(5) .build(); - cluster.waitForClusterToBeReady(); - } - - @AfterAll - void teardown() { - IOUtils.closeQuietly(cluster); } @Nested diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java index b45e68d85eb2..ab893ef5779a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java @@ -30,8 +30,8 @@ class TestOzoneContractFSO extends AbstractOzoneContractTest { @Override - OzoneConfiguration createOzoneConfig() { - OzoneConfiguration conf = createBaseConfiguration(); + protected OzoneConfiguration createOzoneConfig() { + OzoneConfiguration conf = super.createOzoneConfig(); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, FILE_SYSTEM_OPTIMIZED.name()); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java index 97ced88fcde8..c23cebd41bbc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java @@ -30,8 +30,8 @@ class TestOzoneContractLegacy extends AbstractOzoneContractTest { @Override - OzoneConfiguration createOzoneConfig() { - OzoneConfiguration conf = createBaseConfiguration(); + protected OzoneConfiguration createOzoneConfig() { + OzoneConfiguration conf = super.createOzoneConfig(); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, LEGACY.name()); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java index 2b64d397eae0..94a5ed7c242d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java @@ -21,43 +21,37 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; /** * Test allocate container calls. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestAllocateContainer { +public abstract class TestAllocateContainer implements NonHATests.TestCase { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private OzoneConfiguration conf; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); + void init() throws Exception { + conf = cluster().getConf(); storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - xceiverClientManager = new XceiverClientManager(conf); + cluster().getStorageContainerLocationClient(); } @AfterAll - public static void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } + void cleanup() { IOUtils.cleanupWithLogger(null, storageContainerLocationClient); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java index 43df6bf051da..85105809068d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java @@ -21,32 +21,27 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.io.IOException; import java.util.HashMap; import java.util.Set; -import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -55,40 +50,23 @@ /** * This class tests container report with DN container state info. */ -@Timeout(value = 300, unit = TimeUnit.SECONDS) -public class TestContainerReportWithKeys { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@Timeout(300) +public abstract class TestContainerReportWithKeys implements NonHATests.TestCase { private static final Logger LOG = LoggerFactory.getLogger( TestContainerReportWithKeys.class); - private static MiniOzoneCluster cluster = null; - private static OzoneClient client; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; + private OzoneClient client; + private StorageContainerManager scm; - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - client = OzoneClientFactory.getRpcClient(conf); - scm = cluster.getStorageContainerManager(); + void init() throws Exception { + client = cluster().newClient(); + scm = cluster().getStorageContainerManager(); } - /** - * Shutdown MiniDFSCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } @Test @@ -121,7 +99,7 @@ public void testContainerReportKeyWrite() throws Exception { OmKeyLocationInfo keyInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() + cluster().getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); @@ -137,13 +115,4 @@ public void testContainerReportKeyWrite() throws Exception { cinfo.getNumberOfKeys(), cinfo.getUsedBytes()); } - - private static ContainerData getContainerData(long containerID) { - ContainerData containerData; - ContainerSet containerManager = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer().getContainerSet(); - containerData = - containerManager.getContainer(containerID).getContainerData(); - return containerData; - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java index 87728f6ce101..24accb66fe26 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java @@ -18,24 +18,24 @@ package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.ozone.test.NonHATests; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; @@ -47,33 +47,25 @@ /** * Test Container calls. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestContainerSmallFile { +public abstract class TestContainerSmallFile implements NonHATests.TestCase { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private OzoneConfiguration ozoneConfig; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; + private XceiverClientManager xceiverClientManager; @BeforeAll - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, PlacementPolicy.class); - cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = cluster + void init() throws Exception { + ozoneConfig = cluster().getConf(); + storageContainerLocationClient = cluster() .getStorageContainerLocationClient(); xceiverClientManager = new XceiverClientManager(ozoneConfig); } @AfterAll - public static void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } + void cleanup() { IOUtils.cleanupWithLogger(null, storageContainerLocationClient); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java index 43fc45efd098..0003c5f9039a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java @@ -26,18 +26,18 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,34 +53,26 @@ /** * Test Container calls. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestGetCommittedBlockLengthAndPutKey { +public abstract class TestGetCommittedBlockLengthAndPutKey implements NonHATests.TestCase { private static final Logger LOG = LoggerFactory.getLogger(TestGetCommittedBlockLengthAndPutKey.class); - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private OzoneConfiguration ozoneConfig; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; + private XceiverClientManager xceiverClientManager; @BeforeAll - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, PlacementPolicy.class); - cluster = - MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); + void init() throws Exception { + ozoneConfig = cluster().getConf(); storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); + cluster().getStorageContainerLocationClient(); xceiverClientManager = new XceiverClientManager(ozoneConfig); } @AfterAll - public static void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } + void cleanup() { IOUtils.cleanupWithLogger(null, storageContainerLocationClient); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java index dcc9b3e8e37b..88475583a09d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java @@ -18,28 +18,24 @@ package org.apache.hadoop.hdds.scm; +import org.apache.ozone.test.NonHATests; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import javax.management.MBeanServer; import javax.management.ObjectName; import javax.management.openmbean.CompositeData; import javax.management.openmbean.TabularData; -import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.HashMap; import java.util.Iterator; import java.util.Map; -import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -48,36 +44,20 @@ /** * Class which tests the SCMNodeManagerInfo Bean. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestSCMNodeManagerMXBean { +public abstract class TestSCMNodeManagerMXBean implements NonHATests.TestCase { public static final Logger LOG = LoggerFactory.getLogger(TestSCMMXBean.class); - private static int numOfDatanodes = 3; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; - private static MBeanServer mbs; + private StorageContainerManager scm; + private MBeanServer mbs; @BeforeAll - public static void init() throws IOException, TimeoutException, - InterruptedException { - conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_STALENODE_INTERVAL, "60000ms"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numOfDatanodes) - .build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); + void init() { + scm = cluster().getStorageContainerManager(); mbs = ManagementFactory.getPlatformMBeanServer(); } - @AfterAll - public static void cleanup() { - if (cluster != null) { - cluster.shutdown(); - } - } - @Test public void testDiskUsage() throws Exception { ObjectName bean = new ObjectName( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java index 4a9efceeb7b8..d78a472f8500 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java @@ -18,10 +18,8 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterEach; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -30,36 +28,26 @@ import javax.management.ObjectName; import javax.management.openmbean.CompositeData; import javax.management.openmbean.TabularData; -import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.Map; -import java.util.concurrent.TimeoutException; import static org.junit.jupiter.api.Assertions.assertInstanceOf; - /** * Test cases to verify the metrics exposed by SCMPipelineManager via MXBean. */ @Timeout(3000) -public class TestPipelineManagerMXBean { +public abstract class TestPipelineManagerMXBean implements NonHATests.TestCase { - private MiniOzoneCluster cluster; private MBeanServer mbs; @BeforeEach - public void init() - throws IOException, TimeoutException, InterruptedException { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); + void init() { mbs = ManagementFactory.getPlatformMBeanServer(); } /** * Verifies SCMPipelineManagerInfo metrics. - * - * @throws Exception */ @Test public void testPipelineInfo() throws Exception { @@ -68,7 +56,7 @@ public void testPipelineInfo() throws Exception { GenericTestUtils.waitFor(() -> { try { - Map pipelineStateCount = cluster + Map pipelineStateCount = cluster() .getStorageContainerManager().getPipelineManager().getPipelineInfo(); final TabularData data = (TabularData) mbs.getAttribute( bean, "PipelineInfo"); @@ -95,9 +83,4 @@ private Integer getMetricsCount(TabularData data, String state) { } return null; } - - @AfterEach - public void teardown() { - cluster.shutdown(); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java index e49a378a15cf..7d8641c05f32 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java @@ -22,13 +22,11 @@ import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; -import java.util.concurrent.TimeoutException; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.Response; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.jupiter.api.BeforeAll; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.Test; /** @@ -37,25 +35,15 @@ *

jvm_metrics_cpu_system_load

*

jvm_metrics_cpu_jvm_load

*/ -public class TestCpuMetrics { +public abstract class TestCpuMetrics implements NonHATests.TestCase { - private static MiniOzoneCluster cluster; private final OkHttpClient httpClient = new OkHttpClient(); - @BeforeAll - public static void setup() throws InterruptedException, TimeoutException, - IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - } - @Test public void testCpuMetrics() throws IOException { // given String scmHttpServerUrl = "http://localhost:" + - HddsUtils.getPortNumberFromConfigKeys(cluster.getConf(), + HddsUtils.getPortNumberFromConfigKeys(cluster().getConf(), OZONE_SCM_HTTP_ADDRESS_KEY).getAsInt(); Request prometheusMetricsRequest = new Request.Builder() .url(scmHttpServerUrl + "/prom") diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java index 9becc8b2591c..b0c81b54a930 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java @@ -20,9 +20,10 @@ import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; +import org.apache.ozone.test.HATests; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -31,7 +32,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import java.io.IOException; -import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -41,34 +41,19 @@ * This class is to test the serialization/deserialization of cluster tree * information from SCM. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestGetClusterTreeInformation { +public abstract class TestGetClusterTreeInformation implements HATests.TestCase { public static final Logger LOG = LoggerFactory.getLogger(TestGetClusterTreeInformation.class); - private static int numOfDatanodes = 3; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; + private OzoneConfiguration conf; + private StorageContainerManager scm; @BeforeAll - public static void init() throws IOException, TimeoutException, - InterruptedException { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newHABuilder(conf) - .setNumOfOzoneManagers(3) - .setNumOfStorageContainerManagers(3) - .setNumDatanodes(numOfDatanodes) - .build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - } - - @AfterAll - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } + void init() { + conf = cluster().getConf(); + scm = cluster().getStorageContainerManager(); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java index 2f18326f7b1b..544b49a8c74a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java @@ -17,19 +17,12 @@ package org.apache.hadoop.ozone.container.metrics; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics; -import org.junit.jupiter.api.BeforeEach; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; import static org.apache.commons.text.WordUtils.capitalize; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.COMMAND_DISPATCHER_QUEUE_PREFIX; @@ -41,44 +34,9 @@ /** * Test for queue metrics of datanodes. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestDatanodeQueueMetrics { - - private MiniOzoneHAClusterImpl cluster = null; - private OzoneConfiguration conf; - private String omServiceId; - private static int numOfOMs = 3; - private String scmServiceId; - private static int numOfSCMs = 3; - - private static final Logger LOG = LoggerFactory - .getLogger(TestDatanodeQueueMetrics.class); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeEach - public void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - omServiceId = "om-service-test1"; - scmServiceId = "scm-service-test1"; - MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); - builder.setOMServiceId(omServiceId) - .setSCMServiceId(scmServiceId) - .setNumOfStorageContainerManagers(numOfSCMs) - .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(1); - cluster = builder.build(); - cluster.waitForClusterToBeReady(); - } - /** - * Set a timeout for each test. - */ +public abstract class TestDatanodeQueueMetrics implements NonHATests.TestCase { @Test public void testQueueMetrics() { @@ -89,7 +47,6 @@ public void testQueueMetrics() { assertThat(getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + typeSize)) .isGreaterThanOrEqualTo(0); } - } private long getGauge(String metricName) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java index 33d59f101ebc..e16139c63b8e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java @@ -18,27 +18,24 @@ package org.apache.hadoop.ozone.freon; -import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientCreator; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.junit.jupiter.api.AfterAll; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import picocli.CommandLine; -import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -49,32 +46,16 @@ /** * Tests Freon, with MiniOzoneCluster and validate data. */ -public class TestDNRPCLoadGenerator { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestDNRPCLoadGenerator implements NonHATests.TestCase { - private static MiniOzoneCluster cluster = null; - private static ContainerWithPipeline container; - - private static void startCluster(OzoneConfiguration conf) throws Exception { - DatanodeRatisServerConfig ratisServerConfig = - conf.getObject(DatanodeRatisServerConfig.class); - ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); - ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); - conf.setFromObject(ratisServerConfig); - - RatisClientConfig.RaftConfig raftClientConfig = - conf.getObject(RatisClientConfig.RaftConfig.class); - raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); - raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); - conf.setFromObject(raftClientConfig); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5).build(); - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, - 180000); + private ContainerWithPipeline container; + @BeforeAll + void init() throws Exception { + OzoneConfiguration conf = cluster().getConf(); StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient = cluster + storageContainerLocationClient = cluster() .getStorageContainerLocationClient(); container = storageContainerLocationClient.allocateContainer( @@ -87,23 +68,6 @@ private static void startCluster(OzoneConfiguration conf) throws Exception { } } - static void shutdownCluster() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @BeforeAll - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - startCluster(conf); - } - - @AfterAll - public static void shutdown() { - shutdownCluster(); - } - private static Stream provideParameters() { return Stream.of( Arguments.of(true, true), @@ -117,7 +81,7 @@ private static Stream provideParameters() { @MethodSource("provideParameters") public void test(boolean readOnly, boolean ratis) { DNRPCLoadGenerator randomKeyGenerator = - new DNRPCLoadGenerator(cluster.getConf()); + new DNRPCLoadGenerator(cluster().getConf()); CommandLine cmd = new CommandLine(randomKeyGenerator); List cmdArgs = new ArrayList<>(Arrays.asList( "--container-id", Long.toString(container.getContainerInfo().getContainerID()), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java index 5997d5758a28..82e6405135f1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java @@ -18,7 +18,6 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -26,9 +25,11 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import java.io.IOException; @@ -40,35 +41,21 @@ /** * Tests to verify Object store without prefix enabled. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(1200) -public class TestObjectStore { - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static OzoneClient client; +public abstract class TestObjectStore implements NonHATests.TestCase { + private OzoneConfiguration conf; + private OzoneClient client; - /** - * Create a MiniOzoneCluster for testing. - *

- * - * @throws IOException - */ @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); + void init() throws Exception { + conf = cluster().getConf(); + client = cluster().newClient(); } - /** - * Shutdown MiniOzoneCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index 5e3a3aa1980f..87b1a735266e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -48,10 +48,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.util.StringUtils; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import java.io.IOException; @@ -83,30 +85,22 @@ /** * Tests to verify Object store with prefix enabled cases. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(1200) -public class TestObjectStoreWithFSO { +public abstract class TestObjectStoreWithFSO implements NonHATests.TestCase { private static final Path ROOT = new Path(OZONE_URI_DELIMITER); - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static String volumeName; - private static String bucketName; - private static FileSystem fs; - private static OzoneClient client; + private MiniOzoneCluster cluster; + private OzoneConfiguration conf; + private String volumeName; + private String bucketName; + private FileSystem fs; + private OzoneClient client; - /** - * Create a MiniDFSCluster for testing. - *

- * - * @throws IOException - */ @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, - BucketLayout.FILE_SYSTEM_OPTIMIZED.name()); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); + void init() throws Exception { + conf = new OzoneConfiguration(cluster().getConf()); + cluster = cluster(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem OzoneBucket bucket = TestDataUtil @@ -147,7 +141,7 @@ protected void deleteRootDir() throws IOException { } } - private static void deleteRootRecursively(FileStatus[] fileStatuses) + private void deleteRootRecursively(FileStatus[] fileStatuses) throws IOException { for (FileStatus fStatus : fileStatuses) { fs.delete(fStatus.getPath(), true); @@ -829,14 +823,8 @@ public BucketLayout getBucketLayout() { return BucketLayout.FILE_SYSTEM_OPTIMIZED; } - /** - * Shutdown MiniDFSCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 247f7e751038..03eba1e5ef76 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -16,15 +16,12 @@ */ package org.apache.hadoop.ozone.om; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; @@ -40,50 +37,35 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; /** * This class tests the versioning of blocks from OM side. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestOmBlockVersioning { - - private static MiniOzoneCluster cluster = null; - private static OzoneClient client; - private static OzoneConfiguration conf; - private static OzoneManager ozoneManager; - private static OzoneManagerProtocol writeClient; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ +public abstract class TestOmBlockVersioning implements NonHATests.TestCase { + + private OzoneClient client; + private OzoneManager ozoneManager; + private OzoneManagerProtocol writeClient; + @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); - ozoneManager = cluster.getOzoneManager(); + void init() throws Exception { + client = cluster().newClient(); + ozoneManager = cluster().getOzoneManager(); writeClient = client.getObjectStore() .getClientProxy().getOzoneManagerClient(); } - /** - * Shutdown MiniDFSCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java index 2cca0619afe1..b7486aec200e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java @@ -38,9 +38,10 @@ import java.util.List; import java.util.Map; -import org.junit.jupiter.api.AfterAll; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -51,24 +52,17 @@ /** * This class is to test the REST interface exposed by OzoneManager. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestOzoneManagerRestInterface { +public abstract class TestOzoneManagerRestInterface implements NonHATests.TestCase { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; + private MiniOzoneCluster cluster; + private OzoneConfiguration conf; @BeforeAll - public static void setUp() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - } - - @AfterAll - public static void tearDown() throws Exception { - if (cluster != null) { - cluster.shutdown(); - } + void setup() { + conf = cluster().getConf(); + cluster = cluster(); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 8691e5ede383..90aebee5e375 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -19,47 +19,26 @@ import java.net.InetSocketAddress; -import org.apache.hadoop.ozone.admin.OzoneAdmin; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.jupiter.api.AfterAll; +import org.apache.hadoop.ozone.admin.OzoneAdmin; +import org.apache.ozone.test.HATests; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; /** * This class tests ozone admin scm commands. */ -public class TestScmAdminHA { - private static OzoneAdmin ozoneAdmin; - private static OzoneConfiguration conf; - private static String omServiceId; - private static int numOfOMs; - private static MiniOzoneCluster cluster; +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestScmAdminHA implements HATests.TestCase { + + private OzoneAdmin ozoneAdmin; + private MiniOzoneCluster cluster; @BeforeAll - public static void init() throws Exception { + void init() { ozoneAdmin = new OzoneAdmin(); - conf = new OzoneConfiguration(); - - // Init HA cluster - omServiceId = "om-service-test1"; - numOfOMs = 3; - cluster = MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - conf.setQuietMode(false); - // enable ratis for Scm. - conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); - cluster.waitForClusterToBeReady(); - } - - @AfterAll - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } + cluster = cluster(); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/ClusterForTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/ClusterForTests.java new file mode 100644 index 000000000000..c09bff04c563 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/ClusterForTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; + +import java.time.Duration; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; + +/** + * Base class for Ozone integration tests. Manages lifecycle of {@link MiniOzoneCluster}. + *

+ * Subclasses can tweak configuration by overriding {@link #createOzoneConfig()}. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class ClusterForTests { + + private C cluster; + + /** + * Creates the base configuration for tests. This can be tweaked + * in subclasses by overriding {@link #createOzoneConfig()}. + */ + protected static OzoneConfiguration createBaseConfiguration() { + OzoneConfiguration conf = new OzoneConfiguration(); + DatanodeRatisServerConfig ratisServerConfig = + conf.getObject(DatanodeRatisServerConfig.class); + ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); + ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); + conf.setFromObject(ratisServerConfig); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + + conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); + conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); + + return conf; + } + + /** + * Hook method that allows tweaking the configuration. + */ + protected OzoneConfiguration createOzoneConfig() { + return createBaseConfiguration(); + } + + /** + * Hook method to create cluster with different parameters. + */ + protected abstract C createCluster() throws Exception; + + protected C getCluster() { + return cluster; + } + + @BeforeAll + void startCluster() throws Exception { + cluster = createCluster(); + cluster.waitForClusterToBeReady(); + } + + @AfterAll + void shutdownCluster() { + IOUtils.closeQuietly(cluster); + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/HATests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/HATests.java new file mode 100644 index 000000000000..f01a9ed3a04f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/HATests.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.TestInstance; + +import java.util.UUID; + +/** + * Group tests to be run with a single HA cluster. + *

+ * Specific tests are implemented in separate classes, and they are subclasses + * here as {@link Nested} inner classes. This allows running all tests in the + * same cluster. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class HATests extends ClusterForTests { + + /** Hook method for subclasses. */ + MiniOzoneHAClusterImpl.Builder newClusterBuilder() { + return MiniOzoneCluster.newHABuilder(createOzoneConfig()) + .setOMServiceId("om-" + UUID.randomUUID()) + .setNumOfOzoneManagers(3) + .setSCMServiceId("scm-" + UUID.randomUUID()) + .setNumOfStorageContainerManagers(3); + } + + /** Test cases which need HA cluster should implement this. */ + public interface TestCase { + MiniOzoneHAClusterImpl cluster(); + } + + @Nested + class OzoneFsHAURLs extends org.apache.hadoop.fs.ozone.TestOzoneFsHAURLs { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + + @Nested + class GetClusterTreeInformation extends org.apache.hadoop.ozone.TestGetClusterTreeInformation { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + + @Nested + class DatanodeQueueMetrics extends org.apache.hadoop.ozone.container.metrics.TestDatanodeQueueMetrics { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + + @Nested + class ScmAdminHA extends org.apache.hadoop.ozone.shell.TestScmAdminHA { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java new file mode 100644 index 000000000000..8e0e32868815 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.TestInstance; + +/** + * Group tests to be run with a single non-HA cluster. + *

+ * Specific tests are implemented in separate classes, and they are subclasses + * here as {@link Nested} inner classes. This allows running all tests in the + * same cluster. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class NonHATests extends ClusterForTests { + + /** Hook method for subclasses. */ + MiniOzoneCluster.Builder newClusterBuilder() { + return MiniOzoneCluster.newBuilder(createOzoneConfig()) + .setNumDatanodes(5); + } + + /** Test cases for non-HA cluster should implement this. */ + public interface TestCase { + MiniOzoneCluster cluster(); + } + + @Nested + class AllocateContainer extends org.apache.hadoop.hdds.scm.TestAllocateContainer { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ContainerReportWithKeys extends org.apache.hadoop.hdds.scm.TestContainerReportWithKeys { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ContainerSmallFile extends org.apache.hadoop.hdds.scm.TestContainerSmallFile { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class GetCommittedBlockLengthAndPutKey extends org.apache.hadoop.hdds.scm.TestGetCommittedBlockLengthAndPutKey { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class SCMNodeManagerMXBean extends org.apache.hadoop.hdds.scm.TestSCMNodeManagerMXBean { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class PipelineManagerMXBean extends org.apache.hadoop.hdds.scm.pipeline.TestPipelineManagerMXBean { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class CpuMetrics extends org.apache.hadoop.ozone.TestCpuMetrics { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class DNRPCLoadGenerator extends org.apache.hadoop.ozone.freon.TestDNRPCLoadGenerator { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ObjectStore extends org.apache.hadoop.ozone.om.TestObjectStore { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ObjectStoreWithFSO extends org.apache.hadoop.ozone.om.TestObjectStoreWithFSO { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class OmBlockVersioning extends org.apache.hadoop.ozone.om.TestOmBlockVersioning { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class OzoneManagerRestInterface extends org.apache.hadoop.ozone.om.TestOzoneManagerRestInterface { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationHA.java new file mode 100644 index 000000000000..287328f67499 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationHA.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; + +/** Test Ozone with HA cluster. */ +public class TestOzoneIntegrationHA extends HATests { + @Override + protected MiniOzoneHAClusterImpl createCluster() throws Exception { + return newClusterBuilder() + .build(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationNonHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationNonHA.java new file mode 100644 index 000000000000..793580b17d01 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationNonHA.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.jupiter.api.TestInstance; + +/** Test Ozone with non-HA cluster. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class TestOzoneIntegrationNonHA extends NonHATests { + protected MiniOzoneCluster createCluster() throws Exception { + return newClusterBuilder() + .build(); + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/package-info.java new file mode 100644 index 000000000000..2309ac44d874 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains test cluster definitions. + */ +package org.apache.ozone.test;