From b227dfb0ccaa65bf15705e6df348a5fc966aa711 Mon Sep 17 00:00:00 2001 From: wangzhaohui Date: Thu, 21 Dec 2023 14:25:25 +0800 Subject: [PATCH 1/7] HDDS-9955. Simplify assertions in integration tests --- .../fs/ozone/TestOzoneFileInterfaces.java | 24 ++++++++----------- .../TestRootedOzoneFileSystemWithFSO.java | 2 +- .../scm/pipeline/TestLeaderChoosePolicy.java | 4 ++-- .../hdds/scm/pipeline/TestPipelineClose.java | 5 ++-- .../rpc/TestOzoneRpcClientAbstract.java | 12 +++++----- .../rpc/TestOzoneRpcClientForAclAuditLog.java | 3 ++- .../ozone/om/TestOmBlockVersioning.java | 3 +-- .../om/TestOzoneManagerListVolumesSecure.java | 2 +- .../hadoop/ozone/om/TestScmSafeMode.java | 2 +- .../ozone/om/TestSnapshotDeletingService.java | 2 +- .../scm/TestContainerReportWithKeys.java | 5 ++-- .../ozone/scm/TestFailoverWithSCMHA.java | 4 ++-- .../hadoop/ozone/scm/TestSCMMXBean.java | 6 ++--- .../scm/TestStorageContainerManager.java | 2 +- .../ozone/scm/TestXceiverClientManager.java | 6 ++--- .../scm/pipeline/TestSCMPipelineMetrics.java | 2 +- 16 files changed, 40 insertions(+), 44 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index c735e312aac9..ed5eab9aecae 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -72,6 +72,8 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import org.junit.Before; import org.junit.Rule; @@ -259,12 +261,9 @@ public void testOzFsReadWrite() throws IOException { o3fs.pathToKey(path)); // verify prefix directories and the file, do not already exist - assertTrue( - metadataManager.getKeyTable(getBucketLayout()).get(lev1key) == null); - assertTrue( - metadataManager.getKeyTable(getBucketLayout()).get(lev2key) == null); - assertTrue( - metadataManager.getKeyTable(getBucketLayout()).get(fileKey) == null); + assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev1key)); + assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev2key)); + assertNull(metadataManager.getKeyTable(getBucketLayout()).get(fileKey)); try (FSDataOutputStream stream = fs.create(path)) { stream.writeBytes(data); @@ -376,19 +375,16 @@ public void testDirectory() throws IOException { o3fs.pathToKey(leaf)); // verify prefix directories and the leaf, do not already exist - assertTrue( - metadataManager.getKeyTable(getBucketLayout()).get(lev1key) == null); - assertTrue( - metadataManager.getKeyTable(getBucketLayout()).get(lev2key) == null); - assertTrue( - metadataManager.getKeyTable(getBucketLayout()).get(leafKey) == null); + assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev1key)); + assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev2key)); + assertNull(metadataManager.getKeyTable(getBucketLayout()).get(leafKey)); assertTrue("Makedirs returned with false for the path " + leaf, fs.mkdirs(leaf)); // verify the leaf directory got created. leafstatus = getDirectoryStat(leaf); - assertTrue(leafstatus != null); + assertNotNull(leafstatus); FileStatus lev1status; FileStatus lev2status; @@ -409,7 +405,7 @@ public void testDirectory() throws IOException { // check the root directory rootstatus = getDirectoryStat(createPath("/")); - assertTrue(rootstatus != null); + assertNotNull(rootstatus); // root directory listing should contain the lev1 prefix directory FileStatus[] statusList = fs.listStatus(createPath("/")); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java index 49da3e721884..73060701b75d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java @@ -201,7 +201,7 @@ public void testDeleteVolumeAndBucket() throws IOException { assertTrue(getFs().delete(bucketPath2, true)); assertTrue(getFs().delete(volumePath1, false)); long deletes = getOMMetrics().getNumKeyDeletes(); - assertTrue(deletes == prevDeletes + 1); + assertEquals(prevDeletes + 1, deletes); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index a695038d4448..d4e30eb1b391 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -94,9 +94,9 @@ private void checkLeaderBalance(int dnNum, int leaderNumOfEachDn) leaderCount.put(leader, leaderCount.get(leader) + 1); } - assertTrue(leaderCount.size() == dnNum); + assertEquals(leaderCount.size(), dnNum); for (Map.Entry entry: leaderCount.entrySet()) { - assertTrue(leaderCount.get(entry.getKey()) == leaderNumOfEachDn); + assertEquals(leaderCount.get(entry.getKey()), leaderNumOfEachDn); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index 99dd1d1768d8..6d523b21df04 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -63,6 +63,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Tests for Pipeline Closing. @@ -229,7 +230,7 @@ public void testPipelineCloseWithLogFailure() try { pipelineManager.getPipeline(openPipeline.getId()); } catch (PipelineNotFoundException e) { - assertTrue(false, "pipeline should exist"); + fail("pipeline should exist"); } DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0); @@ -275,6 +276,6 @@ private boolean verifyCloseForPipeline(Pipeline pipeline, } assertTrue(found, "SCM did not receive a Close action for the Pipeline"); - return found; + return true; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 1e5dc26d8419..0923f7e476ed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -634,7 +634,7 @@ public void testCreateBucketWithVersioning() volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); assertEquals(bucketName, bucket.getName()); - assertEquals(true, bucket.getVersioning()); + assertTrue(bucket.getVersioning()); } @Test @@ -708,7 +708,7 @@ public void testCreateBucketWithAllArgument() volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); assertEquals(bucketName, bucket.getName()); - assertEquals(true, bucket.getVersioning()); + assertTrue(bucket.getVersioning()); assertEquals(StorageType.SSD, bucket.getStorageType()); assertTrue(bucket.getAcls().contains(userAcl)); assertEquals(repConfig, bucket.getReplicationConfig()); @@ -812,7 +812,7 @@ public void testSetBucketVersioning() bucket.setVersioning(true); OzoneBucket newBucket = volume.getBucket(bucketName); assertEquals(bucketName, newBucket.getName()); - assertEquals(true, newBucket.getVersioning()); + assertTrue(newBucket.getVersioning()); } @Test @@ -830,7 +830,7 @@ public void testAclsAfterCallingSetBucketProperty() throws Exception { OzoneBucket newBucket = volume.getBucket(bucketName); assertEquals(bucketName, newBucket.getName()); - assertEquals(true, newBucket.getVersioning()); + assertTrue(newBucket.getVersioning()); List aclsAfterSet = newBucket.getAcls(); assertEquals(currentAcls, aclsAfterSet); @@ -3796,7 +3796,7 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, assertTrue(latestVersionLocations.isMultipartKey()); latestVersionLocations.getBlocksLatestVersionOnly() .forEach(omKeyLocationInfo -> - assertTrue(omKeyLocationInfo.getPartNumber() != -1)); + assertNotEquals(omKeyLocationInfo.getPartNumber(), -1)); } private String initiateMultipartUpload(OzoneBucket bucket, String keyName, @@ -3996,7 +3996,7 @@ public void testDeletedKeyForGDPR() throws Exception { assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG)); assertEquals("AES", key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM)); - assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null); + assertNotNull(key.getMetadata().get(OzoneConsts.GDPR_SECRET)); try (OzoneInputStream is = bucket.readKey(keyName)) { assertInputStreamContent(text, is); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index 3f7c590bf6ef..d2ace27dc346 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -64,6 +64,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -287,7 +288,7 @@ private void verifyLog(String... expected) throws Exception { try { // When log entry is expected, the log file will contain one line and // that must be equal to the expected string - assertTrue(lines.size() != 0); + assertNotEquals(0, lines.size()); for (String exp: expected) { assertTrue(lines.get(0).contains(exp)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 428bfa730591..93bea2b3328d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -39,7 +39,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -144,7 +143,7 @@ public void testAllocateCommit() throws Exception { List locationInfoList = openKey.getKeyInfo().getLatestVersionLocations() .getBlocksLatestVersionOnly(); - assertTrue(locationInfoList.size() == 1); + assertEquals(locationInfoList.size(), 1); locationInfoList.add(locationInfo); keyArgs.setLocationInfoList(locationInfoList); writeClient.commitKey(keyArgs, openKey.getId()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 977bb0d4e9b9..1c751bc99a59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -294,7 +294,7 @@ private static void doAs(UserGroupInformation ugi, Callable callable) { // Some thread (eg: HeartbeatEndpointTask) will use the login ugi, // so we could not use loginUserFromKeytabAndReturnUGI to switch user. - assertEquals(true, ugi.doAs((PrivilegedAction) () -> { + assertTrue(ugi.doAs((PrivilegedAction) () -> { try { return callable.call(); } catch (Throwable ex) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 1d3aaf351fc7..d682c7f8f31c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -272,7 +272,7 @@ public void testSCMSafeMode() throws Exception { scm = cluster.getStorageContainerManager(); assertTrue(scm.isInSafeMode()); assertFalse(logCapturer.getOutput().contains("SCM exiting safe mode.")); - assertTrue(scm.getCurrentContainerThreshold() == 0); + assertEquals(0, scm.getCurrentContainerThreshold()); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { dn.start(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index 98c23b8076f8..a14255d3c15e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -407,7 +407,7 @@ public void testSnapshotWithFSO() throws Exception { RepeatedOmKeyInfo activeDBDeleted = next.getValue(); OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager(); - assertEquals(activeDBDeleted.getOmKeyInfoList().size(), 1); + assertEquals(1, activeDBDeleted.getOmKeyInfoList().size()); OmKeyInfo activeDbDeletedKeyInfo = activeDBDeleted.getOmKeyInfoList().get(0); long volumeId = metadataManager diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java index bdbe4107fbe1..a30e3db2218e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java @@ -140,9 +140,8 @@ public void testContainerReportKeyWrite() throws Exception { Set replicas = scm.getContainerManager().getContainerReplicas( ContainerID.valueOf(keyInfo.getContainerID())); - Assert.assertTrue(replicas.size() == 1); - replicas.stream().forEach(rp -> - Assert.assertTrue(rp.getDatanodeDetails().getParent() != null)); + Assert.assertEquals(1, replicas.size()); + replicas.stream().forEach(rp -> Assert.assertNotNull(rp.getDatanodeDetails().getParent())); LOG.info("SCM Container Info keyCount: {} usedBytes: {}", cinfo.getNumberOfKeys(), cinfo.getUsedBytes()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java index e1d1ba31d74e..43a2e2603e92 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java @@ -115,7 +115,7 @@ public void testFailover() throws Exception { scmClientConfig.setRetryCount(1); scmClientConfig.setRetryInterval(100); scmClientConfig.setMaxRetryTimeout(1500); - assertEquals(scmClientConfig.getRetryCount(), 15); + assertEquals(15, scmClientConfig.getRetryCount()); conf.setFromObject(scmClientConfig); StorageContainerManager scm = getLeader(cluster); assertNotNull(scm); @@ -161,7 +161,7 @@ public void testMoveFailover() throws Exception { scmClientConfig.setRetryCount(1); scmClientConfig.setRetryInterval(100); scmClientConfig.setMaxRetryTimeout(1500); - assertEquals(scmClientConfig.getRetryCount(), 15); + assertEquals(15, scmClientConfig.getRetryCount()); conf.setFromObject(scmClientConfig); StorageContainerManager scm = getLeader(cluster); assertNotNull(scm); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index a3314f59e7db..94019ed1d629 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -159,12 +159,12 @@ public void testSCMContainerStateCount() throws Exception { containerStateCount.forEach((k, v) -> { if (k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) { - assertEquals((int)v, 5); + assertEquals(5, (int)v); } else if (k.equals(HddsProtos.LifeCycleState.CLOSED.toString())) { - assertEquals((int)v, 5); + assertEquals(5, (int)v); } else { // Remaining all container state count should be zero. - assertEquals((int)v, 0); + assertEquals(0, (int)v); } }); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java index 6916e8cfb841..9286cfe521d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java @@ -1073,7 +1073,7 @@ public void testIncrementalContainerReportQueue() throws Exception { eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); - Assert.assertTrue(containerReportExecutors.droppedEvents() == 0); + Assert.assertEquals(0, containerReportExecutors.droppedEvents()); Thread.currentThread().sleep(3000); Assert.assertEquals(containerReportExecutors.scheduledEvents(), containerReportExecutors.queuedEvents()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java index 9130a87b1a51..ca85c5cf9e3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java @@ -117,7 +117,7 @@ public void testCaching(boolean securityEnabled) throws IOException { clientManager.releaseClient(client1, true); clientManager.releaseClient(client2, true); clientManager.releaseClient(client3, true); - Assertions.assertTrue(clientManager.getClientCache().size() == 0); + Assertions.assertEquals(0, clientManager.getClientCache().size()); } } @@ -159,7 +159,7 @@ public void testFreeByReference() throws IOException { XceiverClientSpi nonExistent1 = cache.getIfPresent( container1.getContainerInfo().getPipelineID().getId().toString() + container1.getContainerInfo().getReplicationType()); - Assertions.assertEquals(null, nonExistent1); + Assertions.assertNull(nonExistent1); // However container call should succeed because of refcount on the client ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null); @@ -218,7 +218,7 @@ public void testFreeByEviction() throws IOException { XceiverClientSpi nonExistent = cache.getIfPresent( container1.getContainerInfo().getPipelineID().getId().toString() + container1.getContainerInfo().getReplicationType()); - Assertions.assertEquals(null, nonExistent); + Assertions.assertNull(nonExistent); // Any container operation should now fail Throwable t = Assertions.assertThrows(IOException.class, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java index 53f4ce5e16ae..568d9679d17d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java @@ -109,7 +109,7 @@ public void testNumBlocksAllocated() throws IOException, TimeoutException { Pipeline pipeline = block.getPipeline(); long numBlocksAllocated = getLongCounter( SCMPipelineMetrics.getBlockAllocationMetricName(pipeline), metrics); - Assertions.assertEquals(numBlocksAllocated, 1); + Assertions.assertEquals(1, numBlocksAllocated); // destroy the pipeline Assertions.assertDoesNotThrow(() -> From 6fcdf4468297d8b77b69c2242d0fd9db6e67e489 Mon Sep 17 00:00:00 2001 From: wangzhaohui Date: Thu, 21 Dec 2023 19:25:43 +0800 Subject: [PATCH 2/7] HDDS-9978. Improve assertTrue assertions in OM integration tests --- .../ozone/om/TestAddRemoveOzoneManager.java | 45 +++++++++--------- .../hadoop/ozone/om/TestKeyManagerImpl.java | 22 ++++----- .../hadoop/ozone/om/TestKeyPurging.java | 4 +- .../hadoop/ozone/om/TestListStatus.java | 4 +- .../ozone/om/TestOMDbCheckpointServlet.java | 33 +++++++------ .../hadoop/ozone/om/TestOMRatisSnapshots.java | 47 +++++++++---------- .../apache/hadoop/ozone/om/TestOmAcls.java | 30 ++++++------ .../apache/hadoop/ozone/om/TestOmMetrics.java | 6 +-- .../om/TestOmSnapshotDisabledRestart.java | 4 +- .../ozone/om/TestOmSnapshotFileSystem.java | 11 +++-- .../om/TestOzoneManagerConfiguration.java | 7 ++- .../om/TestOzoneManagerHAWithAllRunning.java | 29 +++++------- .../TestOzoneManagerHAWithStoppedNodes.java | 9 ++-- .../om/TestOzoneManagerRocksDBLogging.java | 4 +- .../hadoop/ozone/om/TestScmSafeMode.java | 13 ++--- .../om/multitenant/TestMultiTenantVolume.java | 7 +-- .../om/service/TestRangerBGSyncService.java | 22 ++++----- .../ozone/om/snapshot/TestOmSnapshot.java | 9 ++-- 18 files changed, 153 insertions(+), 153 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index d438ad09fc31..f7d7650d0a94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCM_DUMMY_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.TestOzoneManagerHA.createKey; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -131,10 +132,10 @@ private void assertNewOMExistsInPeerList(String nodeId) throws Exception { + " not present in Peer list of OM " + om.getOMNodeId()); assertTrue(om.getOmRatisServer().doesPeerExist(nodeId), "New OM node " + nodeId + " not present in Peer list of OM " + om.getOMNodeId() + " RatisServer"); - assertTrue( - om.getOmRatisServer().getCurrentPeersFromRaftConf().contains(nodeId), - "New OM node " + nodeId + " not present in " + "OM " - + om.getOMNodeId() + "RatisServer's RaftConf"); + assertThat( + om.getOmRatisServer().getCurrentPeersFromRaftConf()) + .withFailMessage("New OM node " + nodeId + " not present in " + "OM " + + om.getOMNodeId() + "RatisServer's RaftConf").contains(nodeId); } OzoneManager newOM = cluster.getOzoneManager(nodeId); @@ -144,7 +145,8 @@ private void assertNewOMExistsInPeerList(String nodeId) throws Exception { // Check Ratis Dir for log files File[] logFiles = getRatisLogFiles(newOM); - assertTrue(logFiles.length > 0, "There are no ratis logs in new OM "); + assertThat(logFiles.length).withFailMessage("There are no ratis logs in new OM ") + .isGreaterThan(0); } private File[] getRatisLogFiles(OzoneManager om) { @@ -197,9 +199,8 @@ public void testBootstrap() throws Exception { GenericTestUtils.waitFor(() -> cluster.getOMLeader() != null, 500, 30000); OzoneManager omLeader = cluster.getOMLeader(); - assertTrue(newOMNodeIds.contains(omLeader.getOMNodeId()), - "New Bootstrapped OM not elected Leader even though" + - " other OMs are down"); + assertThat(newOMNodeIds).withFailMessage("New Bootstrapped OM not elected Leader even though" + + " other OMs are down").contains(omLeader.getOMNodeId()); // Perform some read and write operations with new OM leader IOUtils.closeQuietly(client); @@ -247,10 +248,10 @@ public void testBootstrapWithoutConfigUpdate() throws Exception { Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or" + " have incorrect information of the bootstrapping OM. Update their " + "ozone-site.xml before proceeding.", e.getMessage()); - assertTrue(omLog.getOutput().contains("Remote OM config check " + - "failed on OM " + existingOMNodeId)); - assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId + - " - System Exit")); + assertThat(omLog.getOutput()).contains("Remote OM config check " + + "failed on OM " + existingOMNodeId); + assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId + + " - System Exit"); } /*************************************************************************** @@ -268,11 +269,11 @@ public void testBootstrapWithoutConfigUpdate() throws Exception { try { cluster.bootstrapOzoneManager(newNodeId, false, true); } catch (IOException e) { - assertTrue(omLog.getOutput().contains("Couldn't add OM " + - newNodeId + " to peer list.")); - assertTrue(miniOzoneClusterLog.getOutput().contains( + assertThat(omLog.getOutput()).contains("Couldn't add OM " + + newNodeId + " to peer list."); + assertThat(miniOzoneClusterLog.getOutput()).contains( existingOMNodeId + " - System Exit: There is no OM configuration " + - "for node ID " + newNodeId + " in ozone-site.xml.")); + "for node ID " + newNodeId + " in ozone-site.xml."); // Verify that the existing OM has stopped. assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning()); @@ -321,12 +322,12 @@ public void testForceBootstrap() throws Exception { Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " + "have incorrect information of the bootstrapping OM. Update their " + "ozone-site.xml before proceeding.", e.getMessage()); - assertTrue(omLog.getOutput().contains("Remote OM " + downOMNodeId + - " configuration returned null")); - assertTrue(omLog.getOutput().contains("Remote OM config check " + - "failed on OM " + downOMNodeId)); - assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId + - " - System Exit")); + assertThat(omLog.getOutput()).contains("Remote OM " + downOMNodeId + + " configuration returned null"); + assertThat(omLog.getOutput()).contains("Remote OM config check " + + "failed on OM " + downOMNodeId); + assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId + + " - System Exit"); } /*************************************************************************** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index b878f920193f..cba6f49c72a5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -127,6 +127,7 @@ import org.mockito.Mockito; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -317,8 +318,8 @@ public void allocateBlockFailureInSafeMode() throws Exception { OMException omException = assertThrows(OMException.class, () -> writeClient.allocateBlock(keyArgs, 1L, new ExcludeList())); - assertTrue(omException.getMessage() - .contains("SafeModePrecheck failed for allocateBlock")); + assertThat(omException.getMessage()) + .contains("SafeModePrecheck failed for allocateBlock"); } @Test @@ -334,8 +335,8 @@ public void openKeyFailureInSafeMode() throws Exception { .build(); OMException omException = assertThrows(OMException.class, () -> writeClient.openKey(keyArgs)); - assertTrue(omException.getMessage() - .contains("SafeModePrecheck failed for allocateBlock")); + assertThat(omException.getMessage()) + .contains("SafeModePrecheck failed for allocateBlock"); } @Test @@ -847,9 +848,8 @@ public void testLookupKeyWithLocation() throws IOException { // lookup key, random node as client OmKeyInfo key4 = keyManager.lookupKey(keyArgs, resolvedBucket(), "/d=default-drack/127.0.0.1"); - assertTrue( - keyPipeline.getNodes().containsAll(key4.getLatestVersionLocations() - .getLocationList().get(0).getPipeline().getNodesInOrder())); + assertThat(keyPipeline.getNodes()).containsAll(key4.getLatestVersionLocations() + .getLocationList().get(0).getPipeline().getNodesInOrder()); } @NotNull @@ -1592,11 +1592,11 @@ private void verifyFileStatus(String directory, } // verify filestatus is present in directory or file set accordingly if (fileStatus.isDirectory()) { - assertTrue(directorySet.contains(normalizedKeyName), - directorySet + " doesn't contain " + normalizedKeyName); + assertThat(directorySet).withFailMessage(directorySet + + " doesn't contain " + normalizedKeyName).contains(normalizedKeyName); } else { - assertTrue(fileSet.contains(normalizedKeyName), - fileSet + " doesn't contain " + normalizedKeyName); + assertThat(fileSet).withFailMessage(fileSet + " doesn't contain " + normalizedKeyName) + .contains(normalizedKeyName); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index 066ff6e1db14..83eac0ab288b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -45,7 +45,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -134,7 +134,7 @@ public void testKeysPurgingByKeyDeletingService() throws Exception { () -> keyDeletingService.getDeletedKeyCount().get() >= NUM_KEYS, 1000, 10000); - assertTrue(keyDeletingService.getRunCount().get() > 1); + assertThat(keyDeletingService.getRunCount().get()).isGreaterThan(1); GenericTestUtils.waitFor( () -> { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java index 13e44402363e..a24e78617f72 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java @@ -35,8 +35,8 @@ import java.io.IOException; import java.util.UUID; import java.util.List; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.apache.hadoop.ozone.OzoneConfigKeys. OZONE_FS_ITERATE_BATCH_SIZE; @@ -198,7 +198,7 @@ private void checkKeyList(String keyPrefix, String startKey, OzoneFileStatus stNext = statuses.get(i + 1); System.out.println("status:" + stCurr); - assertTrue(stCurr.getPath().compareTo(stNext.getPath()) < 0); + assertThat(stCurr.getPath().compareTo(stNext.getPath())).isLessThan(0); } if (!statuses.isEmpty()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index a835944eefe1..fba6a723638a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -105,6 +105,7 @@ import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.truncateFileName; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COMPACTION_LOG_FILE_NAME_SUFFIX; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.params.provider.Arguments.arguments; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -275,15 +276,13 @@ public void testEndpoint(String httpMethod) throws Exception { doEndpoint(); - Assertions.assertTrue(tempFile.length() > 0); - Assertions.assertTrue( - omMetrics.getDBCheckpointMetrics(). - getLastCheckpointCreationTimeTaken() > 0); - Assertions.assertTrue( - omMetrics.getDBCheckpointMetrics(). - getLastCheckpointStreamingTimeTaken() > 0); - Assertions.assertTrue(omMetrics.getDBCheckpointMetrics(). - getNumCheckpoints() > initialCheckpointCount); + assertThat(tempFile.length()).isGreaterThan(0); + assertThat(omMetrics.getDBCheckpointMetrics().getLastCheckpointCreationTimeTaken()) + .isGreaterThan(0); + assertThat(omMetrics.getDBCheckpointMetrics().getLastCheckpointStreamingTimeTaken()) + .isGreaterThan(0); + assertThat(omMetrics.getDBCheckpointMetrics().getNumCheckpoints()) + .isGreaterThan(initialCheckpointCount); Mockito.verify(omDbCheckpointServletMock).writeDbDataToStream(any(), any(), any(), eq(toExcludeList), any(), any()); @@ -383,7 +382,7 @@ public void testSpnegoEnabled(String httpMethod) throws Exception { // Recon user should be able to access the servlet and download the // snapshot - Assertions.assertTrue(tempFile.length() > 0); + assertThat(tempFile.length()).isGreaterThan(0); } @Test @@ -483,8 +482,8 @@ public void testWriteDbDataToStream() throws Exception { Set finalCheckpointSet = getFiles(finalCheckpointLocation, newDbDirLength); - Assertions.assertTrue(finalCheckpointSet.contains(OM_HARDLINK_FILE), - "hardlink file exists in checkpoint dir"); + assertThat(finalCheckpointSet).withFailMessage("hardlink file exists in checkpoint dir") + .contains(OM_HARDLINK_FILE); finalCheckpointSet.remove(OM_HARDLINK_FILE); Assertions.assertEquals(initialCheckpointSet, finalCheckpointSet); @@ -522,10 +521,10 @@ public void testWriteDbDataToStream() throws Exception { Set initialFullSet = getFiles(Paths.get(metaDir.toString(), OM_SNAPSHOT_DIR), metaDirLength); - Assertions.assertTrue(finalFullSet.contains(expectedLogStr)); - Assertions.assertTrue(finalFullSet.contains(expectedSstStr)); - Assertions.assertTrue(initialFullSet.contains(unExpectedLogStr)); - Assertions.assertTrue(initialFullSet.contains(unExpectedSstStr)); + assertThat(finalFullSet).contains(expectedLogStr); + assertThat(finalFullSet).contains(expectedSstStr); + assertThat(initialFullSet).contains(unExpectedLogStr); + assertThat(initialFullSet).contains(unExpectedSstStr); // Remove the dummy files that should not have been copied over // from the expected data. @@ -628,7 +627,7 @@ public void testWriteDbDataWithToExcludeFileList() testDirLength); initialCheckpointSet.removeAll(finalCheckpointSet); - Assertions.assertTrue(initialCheckpointSet.contains(dummyFile.getName())); + assertThat(initialCheckpointSet).contains(dummyFile.getName()); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index 093f1107b5fa..920c182c5843 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -94,6 +94,7 @@ import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithStoppedNodes.createKey; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -262,17 +263,16 @@ void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir) throws long followerOMLastAppliedIndex = followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex(); - assertTrue( - followerOMLastAppliedIndex >= leaderOMSnapshotIndex - 1); + assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex - 1); // After the new checkpoint is installed, the follower OM // lastAppliedIndex must >= the snapshot index of the checkpoint. It // could be great than snapshot index if there is any conf entry from ratis. followerOMLastAppliedIndex = followerOM.getOmRatisServer() .getLastAppliedTermIndex().getIndex(); - assertTrue(followerOMLastAppliedIndex >= leaderOMSnapshotIndex); - assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex() - .getTerm() >= leaderOMSnapshotTermIndex); + assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex); + assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex() + .getTerm()).isGreaterThanOrEqualTo(leaderOMSnapshotTermIndex); // Verify checkpoint installation was happened. String msg = "Reloaded OM state"; @@ -317,7 +317,7 @@ void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir) throws sstFileUnion.addAll(sstFiles); } // Confirm that there were multiple tarballs. - assertTrue(sstSetList.size() > 1); + assertThat(sstSetList.size()).isGreaterThan(1); // Confirm that there was no overlap of sst files // between the individual tarballs. assertEquals(sstFileUnion.size(), sstFileCount); @@ -378,7 +378,8 @@ private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM, } } } - Assertions.assertTrue(hardLinkCount > 0, "No hard links were found"); + assertThat(hardLinkCount).withFailMessage("No hard links were found") + .isGreaterThan(0); } @Test @@ -471,8 +472,7 @@ public void testInstallIncrementalSnapshot(@TempDir Path tempDir) // Verify the metrics recording the incremental checkpoint at leader side DBCheckpointMetrics dbMetrics = leaderOM.getMetrics(). getDBCheckpointMetrics(); - Assertions.assertTrue( - dbMetrics.getLastCheckpointStreamingNumSSTExcluded() > 0); + assertThat(dbMetrics.getLastCheckpointStreamingNumSSTExcluded()).isGreaterThan(0); assertEquals(2, dbMetrics.getNumIncrementalCheckpoints()); // Verify RPC server is running @@ -552,9 +552,8 @@ private IncrementData getNextIncrementalTarball( followerOM.getOmSnapshotProvider().getNumDownloaded() == expectedNumDownloads, 1000, 30_000); - assertTrue(followerOM.getOmRatisServer(). - getLastAppliedTermIndex().getIndex() - >= leaderOMSnapshotIndex - 1); + assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex()) + .isGreaterThanOrEqualTo(leaderOMSnapshotIndex - 1); // Now confirm tarball is just incremental and contains no unexpected // files/links. @@ -567,7 +566,7 @@ private IncrementData getNextIncrementalTarball( // Confirm that none of the files in the tarball match one in the // candidate dir. - assertTrue(sstFiles.size() > 0); + assertThat(sstFiles.size()).isGreaterThan(0); for (String s: sstFiles) { File sstFile = Paths.get(followerCandidatePath.toString(), s).toFile(); assertFalse(sstFile.exists(), @@ -588,7 +587,7 @@ private IncrementData getNextIncrementalTarball( "Incremental checkpoint should not " + "duplicate existing links"); } - assertTrue(lineCount > 0); + assertThat(lineCount).isGreaterThan(0); } return id; } @@ -648,7 +647,7 @@ public void testInstallIncrementalSnapshotWithFailure() throws Exception { File followerCandidateDir = followerOM.getOmSnapshotProvider(). getCandidateDir(); List sstList = HAUtils.getExistingSstFiles(followerCandidateDir); - Assertions.assertTrue(sstList.size() > 0); + assertThat(sstList.size()).isGreaterThan(0); Collections.shuffle(sstList); List victimSstList = sstList.subList(0, sstList.size() / 3); for (String sst: victimSstList) { @@ -799,17 +798,16 @@ public void testInstallSnapshotWithClientWrite() throws Exception { long followerOMLastAppliedIndex = followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex(); - assertTrue( - followerOMLastAppliedIndex >= leaderOMSnapshotIndex - 1); + assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex - 1); // After the new checkpoint is installed, the follower OM // lastAppliedIndex must >= the snapshot index of the checkpoint. It // could be great than snapshot index if there is any conf entry from ratis. followerOMLastAppliedIndex = followerOM.getOmRatisServer() .getLastAppliedTermIndex().getIndex(); - assertTrue(followerOMLastAppliedIndex >= leaderOMSnapshotIndex); - assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex() - .getTerm() >= leaderOMSnapshotTermIndex); + assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex); + assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex() + .getTerm()).isGreaterThanOrEqualTo(leaderOMSnapshotTermIndex); // Verify that the follower OM's DB contains the transactions which were // made while it was inactive. @@ -898,17 +896,16 @@ public void testInstallSnapshotWithClientRead() throws Exception { long followerOMLastAppliedIndex = followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex(); - assertTrue( - followerOMLastAppliedIndex >= leaderOMSnapshotIndex - 1); + assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex - 1); // After the new checkpoint is installed, the follower OM // lastAppliedIndex must >= the snapshot index of the checkpoint. It // could be great than snapshot index if there is any conf entry from ratis. followerOMLastAppliedIndex = followerOM.getOmRatisServer() .getLastAppliedTermIndex().getIndex(); - assertTrue(followerOMLastAppliedIndex >= leaderOMSnapshotIndex); - assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex() - .getTerm() >= leaderOMSnapshotTermIndex); + assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex); + assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex() + .getTerm()).isGreaterThanOrEqualTo(leaderOMSnapshotTermIndex); // Verify that the follower OM's DB contains the transactions which were // made while it was inactive. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java index d0d2bf40efdd..02ad087965dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java @@ -48,9 +48,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.audit.AuditLogTestUtils.verifyAuditLog; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test for Ozone Manager ACLs. @@ -124,8 +124,8 @@ public void testCreateVolumePermissionDenied() throws Exception { () -> TestDataUtil.createVolumeAndBucket(client)); assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult()); - assertTrue(logCapturer.getOutput() - .contains("doesn't have CREATE permission to access volume")); + assertThat(logCapturer.getOutput()) + .contains("doesn't have CREATE permission to access volume"); verifyAuditLog(OMAction.CREATE_VOLUME, AuditEventStatus.FAILURE); } @@ -138,8 +138,8 @@ public void testReadVolumePermissionDenied() throws Exception { objectStore.getVolume(bucket.getVolumeName())); assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult()); - assertTrue(logCapturer.getOutput() - .contains("doesn't have READ permission to access volume")); + assertThat(logCapturer.getOutput()) + .contains("doesn't have READ permission to access volume"); verifyAuditLog(OMAction.READ_VOLUME, AuditEventStatus.FAILURE); } @@ -151,8 +151,8 @@ public void testCreateBucketPermissionDenied() throws Exception { () -> TestDataUtil.createVolumeAndBucket(client)); assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult()); - assertTrue(logCapturer.getOutput() - .contains("doesn't have CREATE permission to access bucket")); + assertThat(logCapturer.getOutput()) + .contains("doesn't have CREATE permission to access bucket"); verifyAuditLog(OMAction.CREATE_BUCKET, AuditEventStatus.FAILURE); } @@ -167,8 +167,8 @@ public void testReadBucketPermissionDenied() throws Exception { ); assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult()); - assertTrue(logCapturer.getOutput() - .contains("doesn't have READ permission to access bucket")); + assertThat(logCapturer.getOutput()) + .contains("doesn't have READ permission to access bucket"); verifyAuditLog(OMAction.READ_BUCKET, AuditEventStatus.FAILURE); } @@ -181,8 +181,8 @@ public void testCreateKeyPermissionDenied() throws Exception { OMException exception = assertThrows(OMException.class, () -> TestDataUtil.createKey(bucket, "testKey", "testcontent")); assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult()); - assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " + - "permission to access key")); + assertThat(logCapturer.getOutput()).contains("doesn't have CREATE " + + "permission to access key"); } @Test @@ -195,8 +195,8 @@ public void testReadKeyPermissionDenied() throws Exception { () -> TestDataUtil.getKey(bucket, "testKey")); assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult()); - assertTrue(logCapturer.getOutput().contains("doesn't have READ " + - "permission to access key")); + assertThat(logCapturer.getOutput()).contains("doesn't have READ " + + "permission to access key"); verifyAuditLog(OMAction.READ_KEY, AuditEventStatus.FAILURE); } @@ -209,8 +209,8 @@ public void testSetACLPermissionDenied() throws Exception { OMException exception = assertThrows(OMException.class, () -> bucket.setAcl(new ArrayList<>())); assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult()); - assertTrue(logCapturer.getOutput() - .contains("doesn't have WRITE_ACL permission to access bucket")); + assertThat(logCapturer.getOutput()) + .contains("doesn't have WRITE_ACL permission to access bucket"); verifyAuditLog(OMAction.SET_ACL, AuditEventStatus.FAILURE); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index d49f059a06ca..b88775280407 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -22,9 +22,9 @@ import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.eq; @@ -324,8 +324,8 @@ public void testKeyOps() throws Exception { writeClient.commitKey(keyArgs, keySession.getId()); } catch (Exception e) { //Expected Failure in preExecute due to not enough datanode - assertTrue(e.getMessage().contains("No enough datanodes to choose"), - e::getMessage); + assertThat(e.getMessage()).withFailMessage(e.getMessage()) + .contains("No enough datanodes to choose"); } omMetrics = getMetrics("OMMetrics"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java index fe407bf66f82..613ebc58b56b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java @@ -34,6 +34,8 @@ import java.util.UUID; +import static org.assertj.core.api.Assertions.assertThat; + /** * Integration test to verify that if snapshot feature is disabled, OM start up * will fail when there are still snapshots remaining. @@ -102,7 +104,7 @@ public void testSnapshotFeatureFlag() throws Exception { // Restart OM, expect OM start up failure RuntimeException rte = Assertions.assertThrows(RuntimeException.class, () -> cluster.restartOzoneManager(om, true)); - Assertions.assertTrue(rte.getMessage().contains("snapshots remaining")); + assertThat(rte.getMessage()).contains("snapshots remaining"); // Enable snapshot feature again om.getConfiguration().setBoolean( OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java index 7c7205b20b74..790399c3237f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java @@ -82,6 +82,7 @@ import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -424,10 +425,10 @@ public void testBlockSnapshotFSAccessAfterDeletion() throws Exception { final String errorMsg1 = "no longer active"; FileNotFoundException exception = assertThrows(FileNotFoundException.class, () -> o3fs.listStatus(snapshotRoot)); - assertTrue(exception.getMessage().contains(errorMsg1)); + assertThat(exception.getMessage()).contains(errorMsg1); exception = assertThrows(FileNotFoundException.class, () -> o3fs.listStatus(snapshotParent)); - assertTrue(exception.getMessage().contains(errorMsg1)); + assertThat(exception.getMessage()).contains(errorMsg1); // Note: Different error message due to inconsistent FNFE client-side // handling in BasicOzoneClientAdapterImpl#getFileStatus @@ -435,10 +436,10 @@ public void testBlockSnapshotFSAccessAfterDeletion() throws Exception { final String errorMsg2 = "No such file or directory"; exception = assertThrows(FileNotFoundException.class, () -> o3fs.getFileStatus(snapshotKey1)); - assertTrue(exception.getMessage().contains(errorMsg2)); + assertThat(exception.getMessage()).contains(errorMsg2); exception = assertThrows(FileNotFoundException.class, () -> o3fs.getFileStatus(snapshotKey2)); - assertTrue(exception.getMessage().contains(errorMsg2)); + assertThat(exception.getMessage()).contains(errorMsg2); } @Test @@ -718,7 +719,7 @@ public void testListStatusOnLargeDirectory() throws Exception { "Total directories listed do not match the existing directories"); for (int i = 0; i < numDirs; i++) { - assertTrue(paths.contains(fileStatuses[i].getPath().getName())); + assertThat(paths).contains(fileStatuses[i].getPath().getName()); } deleteSnapshot(snapshotName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 8f75e568057e..80ae54bcf47e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -44,6 +44,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -386,8 +387,7 @@ public void testNoOMNodes() throws Exception { fail("Should have failed to start the cluster!"); } catch (OzoneIllegalArgumentException e) { // Expect error message - assertTrue(e.getMessage().contains( - "List of OM Node ID's should be specified")); + assertThat(e.getMessage()).contains("List of OM Node ID's should be specified"); } } @@ -416,8 +416,7 @@ public void testNoOMAddrs() throws Exception { fail("Should have failed to start the cluster!"); } catch (OzoneIllegalArgumentException e) { // Expect error message - assertTrue(e.getMessage().contains( - "OM RPC Address should be set for all node")); + assertThat(e.getMessage()).contains("OM RPC Address should be set for all node"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java index 8d933912c550..7e0765ea56b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java @@ -73,6 +73,7 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.apache.ratis.metrics.RatisMetrics.RATIS_APPLICATION_NAME_METRICS; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -439,7 +440,7 @@ public void testJMXMetrics() throws Exception { MBeanInfo mBeanInfo = mBeanServer.getMBeanInfo(oname); assertNotNull(mBeanInfo); Object flushCount = mBeanServer.getAttribute(oname, "Count"); - assertTrue((long) flushCount >= 0); + assertThat((long) flushCount).isGreaterThanOrEqualTo(0); } @Test @@ -500,8 +501,7 @@ public void testOMRetryCache() throws Exception { assertTrue(raftClientReply.isSuccess()); - assertTrue(logCapturer.getOutput().contains("created volume:" - + volumeName)); + assertThat(logCapturer.getOutput()).contains("created volume:" + volumeName); logCapturer.clearOutput(); @@ -546,9 +546,7 @@ public void testOMRetryCache() throws Exception { // As second time with same client id and call id, this request should // be executed by ratis server as we are sending this request after cache // expiry duration. - assertTrue(logCapturer.getOutput().contains( - "Volume creation failed")); - + assertThat(logCapturer.getOutput()).contains("Volume creation failed"); } @Test @@ -743,7 +741,7 @@ void testLinkBucketRemoveBucketAcl() throws Exception { OzoneObj srcObj = buildBucketObj(srcBucket); // As by default create will add some default acls in RpcClient. List acls = getObjectStore().getAcl(linkObj); - Assertions.assertTrue(acls.size() > 0); + assertThat(acls.size()).isGreaterThan(0); // Remove an existing acl. boolean removeAcl = getObjectStore().removeAcl(linkObj, acls.get(0)); Assertions.assertTrue(removeAcl); @@ -756,7 +754,7 @@ void testLinkBucketRemoveBucketAcl() throws Exception { OzoneObj srcObj2 = buildBucketObj(srcBucket2); // As by default create will add some default acls in RpcClient. List acls2 = getObjectStore().getAcl(srcObj2); - Assertions.assertTrue(acls2.size() > 0); + assertThat(acls2.size()).isGreaterThan(0); // Remove an existing acl. boolean removeAcl2 = getObjectStore().removeAcl(srcObj2, acls.get(0)); Assertions.assertTrue(removeAcl2); @@ -918,7 +916,7 @@ private void testSetAcl(String remoteUserName, OzoneObj ozoneObj, OzoneObj.ResourceType.PREFIX.name())) { List acls = objectStore.getAcl(ozoneObj); - Assertions.assertTrue(acls.size() > 0); + assertThat(acls.size()).isGreaterThan(0); } OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, @@ -988,7 +986,7 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, // As by default create will add some default acls in RpcClient. List acls = objectStore.getAcl(ozoneObj); - Assertions.assertTrue(acls.size() > 0); + assertThat(acls.size()).isGreaterThan(0); // Remove an existing acl. boolean removeAcl = objectStore.removeAcl(ozoneObj, acls.get(0)); @@ -1066,10 +1064,9 @@ void testOMRatisSnapshot() throws Exception { long smLastAppliedIndex = ozoneManager.getOmRatisServer().getLastAppliedTermIndex().getIndex(); long ratisSnapshotIndex = ozoneManager.getRatisSnapshotIndex(); - assertTrue(smLastAppliedIndex >= ratisSnapshotIndex, - "LastAppliedIndex on OM State Machine (" - + smLastAppliedIndex + ") is less than the saved snapshot index(" - + ratisSnapshotIndex + ")."); + assertThat(smLastAppliedIndex).withFailMessage("LastAppliedIndex on OM State Machine (" + + smLastAppliedIndex + ") is less than the saved snapshot index(" + + ratisSnapshotIndex + ").").isGreaterThanOrEqualTo(ratisSnapshotIndex); // Add more transactions to Ratis to trigger another snapshot while (appliedLogIndex <= (smLastAppliedIndex + getSnapshotThreshold())) { @@ -1091,9 +1088,9 @@ void testOMRatisSnapshot() throws Exception { // The new snapshot index must be greater than the previous snapshot index long ratisSnapshotIndexNew = ozoneManager.getRatisSnapshotIndex(); - assertTrue(ratisSnapshotIndexNew > ratisSnapshotIndex, + assertThat(ratisSnapshotIndexNew).withFailMessage( "Latest snapshot index must be greater than previous " + - "snapshot indices"); + "snapshot indices").isGreaterThan(ratisSnapshotIndex); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index 1a65d5d0653f..3b90b1b74caf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -63,10 +63,10 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_DEFAULT; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Ozone Manager HA tests that stop/restart one or more OM nodes. @@ -298,7 +298,7 @@ void testOMRestart() throws Exception { final long leaderOMSnaphsotIndex = leaderOM.getRatisSnapshotIndex(); // The stopped OM should be lagging behind the leader OM. - assertTrue(followerOM1LastAppliedIndex < leaderOMSnaphsotIndex); + assertThat(followerOM1LastAppliedIndex).isLessThan(leaderOMSnaphsotIndex); // Restart the stopped OM. followerOM1.restart(); @@ -317,8 +317,7 @@ void testOMRestart() throws Exception { final long followerOM1LastAppliedIndexNew = followerOM1.getOmRatisServer().getLastAppliedTermIndex().getIndex(); - assertTrue( - followerOM1LastAppliedIndexNew > leaderOMSnaphsotIndex); + assertThat(followerOM1LastAppliedIndexNew).isGreaterThan(leaderOMSnaphsotIndex); } @Test @@ -590,7 +589,7 @@ private void validateVolumesList(Set expectedVolumes, while (volumeIterator.hasNext()) { OzoneVolume next = volumeIterator.next(); - assertTrue(expectedVolumes.contains(next.getName())); + assertThat(expectedVolumes).contains(next.getName()); expectedCount++; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java index 90eec4429246..ce6eb11998a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java @@ -35,6 +35,8 @@ import org.junit.rules.Timeout; import org.apache.ozone.test.JUnit5AwareTimeout; +import static org.assertj.core.api.Assertions.assertThat; + /** * Test RocksDB logging for Ozone Manager. */ @@ -75,7 +77,7 @@ public void testOMRocksDBLoggingEnabled() throws Exception { waitForRocksDbLog(); Assert.fail("Unexpected RocksDB log: " + logCapturer.getOutput()); } catch (TimeoutException ex) { - Assert.assertTrue(ex.getMessage().contains("Timed out")); + assertThat(ex.getMessage()).contains("Timed out"); } enableRocksDbLogging(true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index d682c7f8f31c..610b9c693c3d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -63,6 +63,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -173,8 +174,8 @@ public void testSafeModeOperations() throws Exception { IOException ioException = assertThrows(IOException.class, () -> bucket1.createKey(keyName, 1000, RATIS, ONE, new HashMap<>())); - assertTrue(ioException.getMessage() - .contains("SafeModePrecheck failed for allocateBlock")); + assertThat(ioException.getMessage()) + .contains("SafeModePrecheck failed for allocateBlock"); } /** @@ -286,8 +287,8 @@ public void testSCMSafeMode() throws Exception { double safeModeCutoff = conf .getDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT); - assertTrue(scm.getCurrentContainerThreshold() >= safeModeCutoff); - assertTrue(logCapturer.getOutput().contains("SCM exiting safe mode.")); + assertThat(scm.getCurrentContainerThreshold()).isGreaterThanOrEqualTo(safeModeCutoff); + assertThat(logCapturer.getOutput()).contains("SCM exiting safe mode."); assertFalse(scm.isInSafeMode()); } @@ -302,8 +303,8 @@ public void testSCMSafeModeRestrictedOp() throws Exception { () -> scm.getClientProtocolServer() .allocateContainer(ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "")); - assertTrue(scmException.getMessage() - .contains("SafeModePrecheck failed for allocateContainer")); + assertThat(scmException.getMessage()) + .contains("SafeModePrecheck failed for allocateContainer"); cluster.startHddsDatanodes(); cluster.waitForClusterToBeReady(); cluster.waitTobeOutOfSafeMode(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java index e311bc0b5e63..1cb436dcb38d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java @@ -49,6 +49,7 @@ import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MULTITENANCY_ENABLED; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -94,8 +95,8 @@ public static void shutdownClusterProvider() { private static void expectFailurePreFinalization(VoidCallable eval) { OMException omException = assertThrows(OMException.class, eval::call); - assertTrue(omException.getMessage() - .contains("cannot be invoked before finalization")); + assertThat(omException.getMessage()) + .contains("cannot be invoked before finalization"); } /** @@ -310,7 +311,7 @@ public void testRejectNonS3CompliantTenantIdCreationWithDefaultStrictS3True() OMException.class, () -> store.createTenant(tenantId)); - assertTrue(e.getMessage().contains("Invalid volume name: " + tenantId)); + assertThat(e.getMessage()).contains("Invalid volume name: " + tenantId); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java index 08358054fcca..c01d0c90446e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java @@ -76,9 +76,9 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_SERVICE; import static org.apache.hadoop.ozone.om.OMMultiTenantManager.OZONE_TENANT_RANGER_ROLE_DESCRIPTION; import static org.apache.hadoop.security.authentication.util.KerberosName.DEFAULT_MECHANISM; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -467,7 +467,7 @@ public void testRemovePolicyAndRole() throws Exception { final long rangerSvcVersionBefore = bgSync.getRangerOzoneServicePolicyVersion(); - assertTrue(rangerSvcVersionBefore >= startingRangerVersion); + assertThat(rangerSvcVersionBefore).isGreaterThanOrEqualTo(startingRangerVersion); // Note: DB Service Version will be -1 if the test starts with an empty DB final long dbSvcVersionBefore = bgSync.getOMDBRangerServiceVersion(); @@ -481,8 +481,8 @@ public void testRemovePolicyAndRole() throws Exception { final long rangerSvcVersionAfter = bgSync.getRangerOzoneServicePolicyVersion(); assertEquals(rangerSvcVersionAfter, dbSvcVersionAfter); - assertTrue(dbSvcVersionAfter > dbSvcVersionBefore); - assertTrue(rangerSvcVersionAfter > rangerSvcVersionBefore); + assertThat(dbSvcVersionAfter).isGreaterThan(dbSvcVersionBefore); + assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore); // Verify that the Ranger policies and roles not backed up // by OzoneManager Multi-Tenancy tables are cleaned up by sync thread @@ -530,7 +530,7 @@ public void testConsistentState() throws Exception { createRolesAndPoliciesInRanger(true); long rangerSvcVersionBefore = bgSync.getRangerOzoneServicePolicyVersion(); - assertTrue(rangerSvcVersionBefore >= startingRangerVersion); + assertThat(rangerSvcVersionBefore).isGreaterThanOrEqualTo(startingRangerVersion); // Note: DB Service Version will be -1 if the test starts with an empty DB final long dbSvcVersionBefore = bgSync.getOMDBRangerServiceVersion(); @@ -583,7 +583,7 @@ public void testRecoverRangerRole() throws Exception { long rangerVersionAfterCreation = bgSync.getRangerOzoneServicePolicyVersion(); - assertTrue(rangerVersionAfterCreation >= startingRangerVersion); + assertThat(rangerVersionAfterCreation).isGreaterThanOrEqualTo(startingRangerVersion); // Delete user bob from user role, expect Ranger sync thread to update it String userRoleName = rolesCreated.get(0); @@ -617,8 +617,8 @@ public void testRecoverRangerRole() throws Exception { final long rangerSvcVersionAfter = bgSync.getRangerOzoneServicePolicyVersion(); assertEquals(rangerSvcVersionAfter, dbSvcVersionAfter); - assertTrue(dbSvcVersionAfter > dbSvcVersionBefore); - assertTrue(rangerSvcVersionAfter > rangerSvcVersionBefore); + assertThat(dbSvcVersionAfter).isGreaterThan(dbSvcVersionBefore); + assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore); for (String policyName : policiesCreated) { final Policy policy = accessController.getPolicy(policyName); @@ -651,7 +651,7 @@ public void testRecreateDeletedRangerPolicy() throws Exception { long rangerVersionAfterCreation = bgSync.getRangerOzoneServicePolicyVersion(); - assertTrue(rangerVersionAfterCreation >= startingRangerVersion); + assertThat(rangerVersionAfterCreation).isGreaterThanOrEqualTo(startingRangerVersion); // Delete both policies, expect Ranger sync thread to recover both accessController.deletePolicy( @@ -673,8 +673,8 @@ public void testRecreateDeletedRangerPolicy() throws Exception { final long rangerSvcVersionAfter = bgSync.getRangerOzoneServicePolicyVersion(); assertEquals(rangerSvcVersionAfter, dbSvcVersionAfter); - assertTrue(dbSvcVersionAfter > dbSvcVersionBefore); - assertTrue(rangerSvcVersionAfter > rangerSvcVersionBefore); + assertThat(dbSvcVersionAfter).isGreaterThan(dbSvcVersionBefore); + assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore); for (String policyName : policiesCreated) { try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 7e9fe787df67..dcd206f07b25 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -134,6 +134,7 @@ import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS; import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG; +import static org.assertj.core.api.Assertions.assertThat; import static org.awaitility.Awaitility.with; import static org.awaitility.Awaitility.await; import static org.hamcrest.MatcherAssert.assertThat; @@ -1392,10 +1393,10 @@ public void testSnapDiff() throws Exception { SnapshotDiffReportOzone diff3 = getSnapDiffReport(volume, bucket, snap3, snap4); assertEquals(1, diff3.getDiffList().size()); - assertTrue(diff3.getDiffList().contains( + assertThat(diff3.getDiffList()).contains( SnapshotDiffReportOzone.getDiffReportEntry( SnapshotDiffReportOzone.DiffType.RENAME, key2, - key2Renamed))); + key2Renamed)); // Create a directory @@ -1406,9 +1407,9 @@ public void testSnapDiff() throws Exception { SnapshotDiffReportOzone diff4 = getSnapDiffReport(volume, bucket, snap4, snap5); assertEquals(1, diff4.getDiffList().size()); - assertTrue(diff4.getDiffList().contains( + assertThat(diff4.getDiffList()).contains( SnapshotDiffReportOzone.getDiffReportEntry( - SnapshotDiffReportOzone.DiffType.CREATE, dir1))); + SnapshotDiffReportOzone.DiffType.CREATE, dir1)); String key3 = createFileKeyWithPrefix(bucket1, "key-3-"); String snap6 = "snap" + counter.incrementAndGet(); From 2fc8f58c4b5346f91deb1e4332a4c577e6de3da2 Mon Sep 17 00:00:00 2001 From: wangzhaohui Date: Fri, 22 Dec 2023 09:48:28 +0800 Subject: [PATCH 3/7] fix comments --- .../fs/ozone/TestOzoneFileInterfaces.java | 24 ++++++++------- .../ozone/om/TestAddRemoveOzoneManager.java | 12 ++++---- .../hadoop/ozone/om/TestKeyManagerImpl.java | 5 ++-- .../apache/hadoop/ozone/om/TestOmMetrics.java | 3 +- .../om/TestOzoneManagerConfiguration.java | 7 +++-- .../om/TestOzoneManagerHAWithAllRunning.java | 29 ++++++++++--------- 6 files changed, 44 insertions(+), 36 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index ed5eab9aecae..c735e312aac9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -72,8 +72,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.junit.Assume.assumeFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import org.junit.Before; import org.junit.Rule; @@ -261,9 +259,12 @@ public void testOzFsReadWrite() throws IOException { o3fs.pathToKey(path)); // verify prefix directories and the file, do not already exist - assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev1key)); - assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev2key)); - assertNull(metadataManager.getKeyTable(getBucketLayout()).get(fileKey)); + assertTrue( + metadataManager.getKeyTable(getBucketLayout()).get(lev1key) == null); + assertTrue( + metadataManager.getKeyTable(getBucketLayout()).get(lev2key) == null); + assertTrue( + metadataManager.getKeyTable(getBucketLayout()).get(fileKey) == null); try (FSDataOutputStream stream = fs.create(path)) { stream.writeBytes(data); @@ -375,16 +376,19 @@ public void testDirectory() throws IOException { o3fs.pathToKey(leaf)); // verify prefix directories and the leaf, do not already exist - assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev1key)); - assertNull(metadataManager.getKeyTable(getBucketLayout()).get(lev2key)); - assertNull(metadataManager.getKeyTable(getBucketLayout()).get(leafKey)); + assertTrue( + metadataManager.getKeyTable(getBucketLayout()).get(lev1key) == null); + assertTrue( + metadataManager.getKeyTable(getBucketLayout()).get(lev2key) == null); + assertTrue( + metadataManager.getKeyTable(getBucketLayout()).get(leafKey) == null); assertTrue("Makedirs returned with false for the path " + leaf, fs.mkdirs(leaf)); // verify the leaf directory got created. leafstatus = getDirectoryStat(leaf); - assertNotNull(leafstatus); + assertTrue(leafstatus != null); FileStatus lev1status; FileStatus lev2status; @@ -405,7 +409,7 @@ public void testDirectory() throws IOException { // check the root directory rootstatus = getDirectoryStat(createPath("/")); - assertNotNull(rootstatus); + assertTrue(rootstatus != null); // root directory listing should contain the lev1 prefix directory FileStatus[] statusList = fs.listStatus(createPath("/")); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index f7d7650d0a94..632974475a72 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -132,10 +132,9 @@ private void assertNewOMExistsInPeerList(String nodeId) throws Exception { + " not present in Peer list of OM " + om.getOMNodeId()); assertTrue(om.getOmRatisServer().doesPeerExist(nodeId), "New OM node " + nodeId + " not present in Peer list of OM " + om.getOMNodeId() + " RatisServer"); - assertThat( - om.getOmRatisServer().getCurrentPeersFromRaftConf()) - .withFailMessage("New OM node " + nodeId + " not present in " + "OM " - + om.getOMNodeId() + "RatisServer's RaftConf").contains(nodeId); + assertThat(om.getOmRatisServer().getCurrentPeersFromRaftConf()) + .withFailMessage("New OM node " + nodeId + " not present in " + om.getOMNodeId() + "'s RaftConf") + .contains(nodeId); } OzoneManager newOM = cluster.getOzoneManager(nodeId); @@ -199,8 +198,9 @@ public void testBootstrap() throws Exception { GenericTestUtils.waitFor(() -> cluster.getOMLeader() != null, 500, 30000); OzoneManager omLeader = cluster.getOMLeader(); - assertThat(newOMNodeIds).withFailMessage("New Bootstrapped OM not elected Leader even though" + - " other OMs are down").contains(omLeader.getOMNodeId()); + assertThat(newOMNodeIds) + .withFailMessage("New Bootstrapped OM not elected Leader even though" + " other OMs are down") + .contains(omLeader.getOMNodeId()); // Perform some read and write operations with new OM leader IOUtils.closeQuietly(client); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index cba6f49c72a5..88821c6bdae9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -848,8 +848,9 @@ public void testLookupKeyWithLocation() throws IOException { // lookup key, random node as client OmKeyInfo key4 = keyManager.lookupKey(keyArgs, resolvedBucket(), "/d=default-drack/127.0.0.1"); - assertThat(keyPipeline.getNodes()).containsAll(key4.getLatestVersionLocations() - .getLocationList().get(0).getPipeline().getNodesInOrder()); + assertThat(keyPipeline.getNodes()) + .containsAll(key4.getLatestVersionLocations() + .getLocationList().get(0).getPipeline().getNodesInOrder()); } @NotNull diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index b88775280407..8ab1cf54a583 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -324,8 +324,7 @@ public void testKeyOps() throws Exception { writeClient.commitKey(keyArgs, keySession.getId()); } catch (Exception e) { //Expected Failure in preExecute due to not enough datanode - assertThat(e.getMessage()).withFailMessage(e.getMessage()) - .contains("No enough datanodes to choose"); + assertThat(e.getMessage()).contains("No enough datanodes to choose"); } omMetrics = getMetrics("OMMetrics"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 80ae54bcf47e..8f75e568057e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -44,7 +44,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -387,7 +386,8 @@ public void testNoOMNodes() throws Exception { fail("Should have failed to start the cluster!"); } catch (OzoneIllegalArgumentException e) { // Expect error message - assertThat(e.getMessage()).contains("List of OM Node ID's should be specified"); + assertTrue(e.getMessage().contains( + "List of OM Node ID's should be specified")); } } @@ -416,7 +416,8 @@ public void testNoOMAddrs() throws Exception { fail("Should have failed to start the cluster!"); } catch (OzoneIllegalArgumentException e) { // Expect error message - assertThat(e.getMessage()).contains("OM RPC Address should be set for all node"); + assertTrue(e.getMessage().contains( + "OM RPC Address should be set for all node")); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java index 7e0765ea56b9..8d933912c550 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java @@ -73,7 +73,6 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.apache.ratis.metrics.RatisMetrics.RATIS_APPLICATION_NAME_METRICS; -import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -440,7 +439,7 @@ public void testJMXMetrics() throws Exception { MBeanInfo mBeanInfo = mBeanServer.getMBeanInfo(oname); assertNotNull(mBeanInfo); Object flushCount = mBeanServer.getAttribute(oname, "Count"); - assertThat((long) flushCount).isGreaterThanOrEqualTo(0); + assertTrue((long) flushCount >= 0); } @Test @@ -501,7 +500,8 @@ public void testOMRetryCache() throws Exception { assertTrue(raftClientReply.isSuccess()); - assertThat(logCapturer.getOutput()).contains("created volume:" + volumeName); + assertTrue(logCapturer.getOutput().contains("created volume:" + + volumeName)); logCapturer.clearOutput(); @@ -546,7 +546,9 @@ public void testOMRetryCache() throws Exception { // As second time with same client id and call id, this request should // be executed by ratis server as we are sending this request after cache // expiry duration. - assertThat(logCapturer.getOutput()).contains("Volume creation failed"); + assertTrue(logCapturer.getOutput().contains( + "Volume creation failed")); + } @Test @@ -741,7 +743,7 @@ void testLinkBucketRemoveBucketAcl() throws Exception { OzoneObj srcObj = buildBucketObj(srcBucket); // As by default create will add some default acls in RpcClient. List acls = getObjectStore().getAcl(linkObj); - assertThat(acls.size()).isGreaterThan(0); + Assertions.assertTrue(acls.size() > 0); // Remove an existing acl. boolean removeAcl = getObjectStore().removeAcl(linkObj, acls.get(0)); Assertions.assertTrue(removeAcl); @@ -754,7 +756,7 @@ void testLinkBucketRemoveBucketAcl() throws Exception { OzoneObj srcObj2 = buildBucketObj(srcBucket2); // As by default create will add some default acls in RpcClient. List acls2 = getObjectStore().getAcl(srcObj2); - assertThat(acls2.size()).isGreaterThan(0); + Assertions.assertTrue(acls2.size() > 0); // Remove an existing acl. boolean removeAcl2 = getObjectStore().removeAcl(srcObj2, acls.get(0)); Assertions.assertTrue(removeAcl2); @@ -916,7 +918,7 @@ private void testSetAcl(String remoteUserName, OzoneObj ozoneObj, OzoneObj.ResourceType.PREFIX.name())) { List acls = objectStore.getAcl(ozoneObj); - assertThat(acls.size()).isGreaterThan(0); + Assertions.assertTrue(acls.size() > 0); } OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, @@ -986,7 +988,7 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, // As by default create will add some default acls in RpcClient. List acls = objectStore.getAcl(ozoneObj); - assertThat(acls.size()).isGreaterThan(0); + Assertions.assertTrue(acls.size() > 0); // Remove an existing acl. boolean removeAcl = objectStore.removeAcl(ozoneObj, acls.get(0)); @@ -1064,9 +1066,10 @@ void testOMRatisSnapshot() throws Exception { long smLastAppliedIndex = ozoneManager.getOmRatisServer().getLastAppliedTermIndex().getIndex(); long ratisSnapshotIndex = ozoneManager.getRatisSnapshotIndex(); - assertThat(smLastAppliedIndex).withFailMessage("LastAppliedIndex on OM State Machine (" - + smLastAppliedIndex + ") is less than the saved snapshot index(" - + ratisSnapshotIndex + ").").isGreaterThanOrEqualTo(ratisSnapshotIndex); + assertTrue(smLastAppliedIndex >= ratisSnapshotIndex, + "LastAppliedIndex on OM State Machine (" + + smLastAppliedIndex + ") is less than the saved snapshot index(" + + ratisSnapshotIndex + ")."); // Add more transactions to Ratis to trigger another snapshot while (appliedLogIndex <= (smLastAppliedIndex + getSnapshotThreshold())) { @@ -1088,9 +1091,9 @@ void testOMRatisSnapshot() throws Exception { // The new snapshot index must be greater than the previous snapshot index long ratisSnapshotIndexNew = ozoneManager.getRatisSnapshotIndex(); - assertThat(ratisSnapshotIndexNew).withFailMessage( + assertTrue(ratisSnapshotIndexNew > ratisSnapshotIndex, "Latest snapshot index must be greater than previous " + - "snapshot indices").isGreaterThan(ratisSnapshotIndex); + "snapshot indices"); } } From 8228e892ea52cf4625d408db3fc995bd5fcc4800 Mon Sep 17 00:00:00 2001 From: wangzhaohui Date: Fri, 22 Dec 2023 09:52:39 +0800 Subject: [PATCH 4/7] fix checkstyle --- .../java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 88821c6bdae9..1521b4c61497 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -850,7 +850,7 @@ public void testLookupKeyWithLocation() throws IOException { "/d=default-drack/127.0.0.1"); assertThat(keyPipeline.getNodes()) .containsAll(key4.getLatestVersionLocations() - .getLocationList().get(0).getPipeline().getNodesInOrder()); + .getLocationList().get(0).getPipeline().getNodesInOrder()); } @NotNull From b90b4b1ef794caa265a27bc211e5aabef75a446e Mon Sep 17 00:00:00 2001 From: wangzhaohui Date: Fri, 22 Dec 2023 19:44:19 +0800 Subject: [PATCH 5/7] improve assertInstanceOf --- .../org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java index fbe2aa7adf0d..f5abc5f2dbed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java @@ -23,6 +23,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import java.io.File; import java.io.IOException; @@ -296,7 +297,7 @@ public void testPrepareWithMultipleThreads() throws Exception { .anyMatch((vol) -> vol.getName().equals(volumeName))); } catch (ExecutionException ex) { Throwable cause = ex.getCause(); - assertTrue(cause instanceof OMException); + assertInstanceOf(OMException.class, cause); assertEquals( NOT_SUPPORTED_OPERATION_WHEN_PREPARED, ((OMException) cause).getResult()); From 8491b60f1f38de2658148b4fd3fe0cdde90266d9 Mon Sep 17 00:00:00 2001 From: wangzhaohui Date: Fri, 22 Dec 2023 20:58:24 +0800 Subject: [PATCH 6/7] fix comments --- .../org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java index f5abc5f2dbed..1407b2dfcbc2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java @@ -297,10 +297,8 @@ public void testPrepareWithMultipleThreads() throws Exception { .anyMatch((vol) -> vol.getName().equals(volumeName))); } catch (ExecutionException ex) { Throwable cause = ex.getCause(); - assertInstanceOf(OMException.class, cause); - assertEquals( - NOT_SUPPORTED_OPERATION_WHEN_PREPARED, - ((OMException) cause).getResult()); + OMException cause = assertInstanceOf(OMException.class, cause); + assertEquals(NOT_SUPPORTED_OPERATION_WHEN_PREPARED, cause.getResult()); } } } From f31946275cf2c59a04c2d8fe907a01cf4da44f85 Mon Sep 17 00:00:00 2001 From: wangzhaohui Date: Fri, 22 Dec 2023 21:24:56 +0800 Subject: [PATCH 7/7] fix failed build --- .../org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java index 1407b2dfcbc2..427905512961 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java @@ -296,8 +296,7 @@ public void testPrepareWithMultipleThreads() throws Exception { .stream() .anyMatch((vol) -> vol.getName().equals(volumeName))); } catch (ExecutionException ex) { - Throwable cause = ex.getCause(); - OMException cause = assertInstanceOf(OMException.class, cause); + OMException cause = assertInstanceOf(OMException.class, ex.getCause()); assertEquals(NOT_SUPPORTED_OPERATION_WHEN_PREPARED, cause.getResult()); } }